var/home/core/zuul-output/0000755000175000017500000000000015111343612014522 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111372273015473 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005377444515111372263017721 0ustar rootrootNov 25 15:17:16 crc systemd[1]: Starting Kubernetes Kubelet... Nov 25 15:17:16 crc restorecon[4758]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 15:17:16 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:17 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 15:17:18 crc restorecon[4758]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 15:17:18 crc restorecon[4758]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 25 15:17:19 crc kubenswrapper[4800]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 15:17:19 crc kubenswrapper[4800]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 25 15:17:19 crc kubenswrapper[4800]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 15:17:19 crc kubenswrapper[4800]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 15:17:19 crc kubenswrapper[4800]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 25 15:17:19 crc kubenswrapper[4800]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.447584 4800 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458009 4800 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458051 4800 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458060 4800 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458066 4800 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458071 4800 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458076 4800 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458081 4800 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458085 4800 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458090 4800 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458095 4800 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458100 4800 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458104 4800 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458109 4800 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458121 4800 feature_gate.go:330] unrecognized feature gate: Example Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458125 4800 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458129 4800 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458135 4800 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458141 4800 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458145 4800 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458150 4800 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458155 4800 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458160 4800 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458164 4800 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458170 4800 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458176 4800 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458180 4800 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458185 4800 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458189 4800 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458193 4800 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458198 4800 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458203 4800 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458208 4800 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458212 4800 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458217 4800 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458221 4800 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458225 4800 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458231 4800 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458236 4800 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458241 4800 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458245 4800 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458250 4800 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458254 4800 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458259 4800 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458263 4800 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458268 4800 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458272 4800 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458277 4800 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458281 4800 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458284 4800 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458289 4800 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458293 4800 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458297 4800 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458301 4800 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458305 4800 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458309 4800 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458315 4800 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458320 4800 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458324 4800 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458329 4800 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458333 4800 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458337 4800 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458341 4800 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458345 4800 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458350 4800 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458354 4800 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458358 4800 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458363 4800 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458368 4800 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458373 4800 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458378 4800 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.458383 4800 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458484 4800 flags.go:64] FLAG: --address="0.0.0.0" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458497 4800 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458528 4800 flags.go:64] FLAG: --anonymous-auth="true" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458536 4800 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458547 4800 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458552 4800 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458561 4800 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458569 4800 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458574 4800 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458580 4800 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458586 4800 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458591 4800 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458596 4800 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458601 4800 flags.go:64] FLAG: --cgroup-root="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458606 4800 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458611 4800 flags.go:64] FLAG: --client-ca-file="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458616 4800 flags.go:64] FLAG: --cloud-config="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458621 4800 flags.go:64] FLAG: --cloud-provider="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458626 4800 flags.go:64] FLAG: --cluster-dns="[]" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458632 4800 flags.go:64] FLAG: --cluster-domain="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458637 4800 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458642 4800 flags.go:64] FLAG: --config-dir="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458647 4800 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458652 4800 flags.go:64] FLAG: --container-log-max-files="5" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458676 4800 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458681 4800 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458686 4800 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458701 4800 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458708 4800 flags.go:64] FLAG: --contention-profiling="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458713 4800 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458717 4800 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458722 4800 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458727 4800 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458734 4800 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458739 4800 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458746 4800 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458752 4800 flags.go:64] FLAG: --enable-load-reader="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458757 4800 flags.go:64] FLAG: --enable-server="true" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458763 4800 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458779 4800 flags.go:64] FLAG: --event-burst="100" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458784 4800 flags.go:64] FLAG: --event-qps="50" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458789 4800 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458794 4800 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458799 4800 flags.go:64] FLAG: --eviction-hard="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458806 4800 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458811 4800 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458816 4800 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458821 4800 flags.go:64] FLAG: --eviction-soft="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458826 4800 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458831 4800 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458836 4800 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458882 4800 flags.go:64] FLAG: --experimental-mounter-path="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458887 4800 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458892 4800 flags.go:64] FLAG: --fail-swap-on="true" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458897 4800 flags.go:64] FLAG: --feature-gates="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458907 4800 flags.go:64] FLAG: --file-check-frequency="20s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458912 4800 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458929 4800 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458935 4800 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458941 4800 flags.go:64] FLAG: --healthz-port="10248" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458949 4800 flags.go:64] FLAG: --help="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458954 4800 flags.go:64] FLAG: --hostname-override="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458959 4800 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458964 4800 flags.go:64] FLAG: --http-check-frequency="20s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458976 4800 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458981 4800 flags.go:64] FLAG: --image-credential-provider-config="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458986 4800 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458992 4800 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.458997 4800 flags.go:64] FLAG: --image-service-endpoint="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459002 4800 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459007 4800 flags.go:64] FLAG: --kube-api-burst="100" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459012 4800 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459029 4800 flags.go:64] FLAG: --kube-api-qps="50" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459034 4800 flags.go:64] FLAG: --kube-reserved="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459040 4800 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459045 4800 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459050 4800 flags.go:64] FLAG: --kubelet-cgroups="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459055 4800 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459060 4800 flags.go:64] FLAG: --lock-file="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459064 4800 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459069 4800 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459075 4800 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459083 4800 flags.go:64] FLAG: --log-json-split-stream="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459088 4800 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459093 4800 flags.go:64] FLAG: --log-text-split-stream="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459098 4800 flags.go:64] FLAG: --logging-format="text" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459103 4800 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459109 4800 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459114 4800 flags.go:64] FLAG: --manifest-url="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459119 4800 flags.go:64] FLAG: --manifest-url-header="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459126 4800 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459131 4800 flags.go:64] FLAG: --max-open-files="1000000" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459139 4800 flags.go:64] FLAG: --max-pods="110" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459144 4800 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459149 4800 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459154 4800 flags.go:64] FLAG: --memory-manager-policy="None" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459159 4800 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459163 4800 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459169 4800 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459175 4800 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459190 4800 flags.go:64] FLAG: --node-status-max-images="50" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459195 4800 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459212 4800 flags.go:64] FLAG: --oom-score-adj="-999" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459217 4800 flags.go:64] FLAG: --pod-cidr="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459222 4800 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459232 4800 flags.go:64] FLAG: --pod-manifest-path="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459237 4800 flags.go:64] FLAG: --pod-max-pids="-1" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459242 4800 flags.go:64] FLAG: --pods-per-core="0" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459247 4800 flags.go:64] FLAG: --port="10250" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459252 4800 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459258 4800 flags.go:64] FLAG: --provider-id="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459263 4800 flags.go:64] FLAG: --qos-reserved="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459268 4800 flags.go:64] FLAG: --read-only-port="10255" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459274 4800 flags.go:64] FLAG: --register-node="true" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459279 4800 flags.go:64] FLAG: --register-schedulable="true" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459283 4800 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459293 4800 flags.go:64] FLAG: --registry-burst="10" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459298 4800 flags.go:64] FLAG: --registry-qps="5" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459303 4800 flags.go:64] FLAG: --reserved-cpus="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459308 4800 flags.go:64] FLAG: --reserved-memory="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459316 4800 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459321 4800 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459326 4800 flags.go:64] FLAG: --rotate-certificates="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459332 4800 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459337 4800 flags.go:64] FLAG: --runonce="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459342 4800 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459347 4800 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459352 4800 flags.go:64] FLAG: --seccomp-default="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459356 4800 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459361 4800 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459366 4800 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459371 4800 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459377 4800 flags.go:64] FLAG: --storage-driver-password="root" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459383 4800 flags.go:64] FLAG: --storage-driver-secure="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459388 4800 flags.go:64] FLAG: --storage-driver-table="stats" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459393 4800 flags.go:64] FLAG: --storage-driver-user="root" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459398 4800 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459404 4800 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459409 4800 flags.go:64] FLAG: --system-cgroups="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459414 4800 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459422 4800 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459428 4800 flags.go:64] FLAG: --tls-cert-file="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459433 4800 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459439 4800 flags.go:64] FLAG: --tls-min-version="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459444 4800 flags.go:64] FLAG: --tls-private-key-file="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459449 4800 flags.go:64] FLAG: --topology-manager-policy="none" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459454 4800 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459459 4800 flags.go:64] FLAG: --topology-manager-scope="container" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459464 4800 flags.go:64] FLAG: --v="2" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459472 4800 flags.go:64] FLAG: --version="false" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459480 4800 flags.go:64] FLAG: --vmodule="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459487 4800 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.459492 4800 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459624 4800 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459631 4800 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459636 4800 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459640 4800 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459645 4800 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459649 4800 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459653 4800 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459657 4800 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459662 4800 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459667 4800 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459671 4800 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459676 4800 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459680 4800 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459684 4800 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459688 4800 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459692 4800 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459697 4800 feature_gate.go:330] unrecognized feature gate: Example Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459704 4800 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459728 4800 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459734 4800 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459740 4800 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459744 4800 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459749 4800 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459754 4800 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459764 4800 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459768 4800 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459773 4800 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459779 4800 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459783 4800 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459789 4800 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459793 4800 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459797 4800 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459801 4800 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459805 4800 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459809 4800 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459813 4800 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459817 4800 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459821 4800 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459825 4800 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459829 4800 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459833 4800 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459837 4800 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459841 4800 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459863 4800 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459868 4800 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459872 4800 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459876 4800 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459880 4800 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459884 4800 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459889 4800 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459893 4800 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459897 4800 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459901 4800 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459905 4800 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459909 4800 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459914 4800 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459922 4800 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459926 4800 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459930 4800 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459935 4800 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459940 4800 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459944 4800 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459948 4800 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459954 4800 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459959 4800 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459964 4800 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459968 4800 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459972 4800 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459976 4800 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459980 4800 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.459984 4800 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.465696 4800 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.479112 4800 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.479163 4800 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479251 4800 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479259 4800 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479298 4800 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479306 4800 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479311 4800 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479315 4800 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479319 4800 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479323 4800 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479327 4800 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479331 4800 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479335 4800 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479339 4800 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479342 4800 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479346 4800 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479350 4800 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479354 4800 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479358 4800 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479363 4800 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479369 4800 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479375 4800 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479379 4800 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479383 4800 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479388 4800 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479392 4800 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479396 4800 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479402 4800 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479407 4800 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479411 4800 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479414 4800 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479419 4800 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479422 4800 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479426 4800 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479430 4800 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479434 4800 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479440 4800 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479446 4800 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479451 4800 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479454 4800 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479458 4800 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479462 4800 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479466 4800 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479470 4800 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479473 4800 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479477 4800 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479480 4800 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479483 4800 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479487 4800 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479490 4800 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479495 4800 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479500 4800 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479504 4800 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479507 4800 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479511 4800 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479515 4800 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479519 4800 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479523 4800 feature_gate.go:330] unrecognized feature gate: Example Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479528 4800 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479532 4800 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479536 4800 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479539 4800 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479543 4800 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479547 4800 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479551 4800 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479554 4800 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479573 4800 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479579 4800 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479583 4800 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479595 4800 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479598 4800 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479602 4800 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479607 4800 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.479614 4800 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479861 4800 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479916 4800 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479921 4800 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479926 4800 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479929 4800 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479933 4800 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479937 4800 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479941 4800 feature_gate.go:330] unrecognized feature gate: Example Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479955 4800 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479959 4800 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479962 4800 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479966 4800 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479970 4800 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479974 4800 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479978 4800 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479981 4800 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479985 4800 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479988 4800 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479992 4800 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479996 4800 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.479999 4800 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480003 4800 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480006 4800 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480010 4800 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480014 4800 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480018 4800 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480023 4800 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480026 4800 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480031 4800 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480037 4800 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480041 4800 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480046 4800 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480051 4800 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480055 4800 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480059 4800 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480063 4800 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480067 4800 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480073 4800 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480077 4800 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480081 4800 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480086 4800 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480091 4800 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480094 4800 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480098 4800 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480102 4800 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480106 4800 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480110 4800 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480114 4800 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480119 4800 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480123 4800 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480127 4800 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480130 4800 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480144 4800 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480147 4800 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480151 4800 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480156 4800 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480160 4800 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480164 4800 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480168 4800 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480172 4800 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480176 4800 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480180 4800 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480186 4800 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480190 4800 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480194 4800 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480198 4800 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480201 4800 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480205 4800 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480208 4800 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480212 4800 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.480215 4800 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.480221 4800 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.481188 4800 server.go:940] "Client rotation is on, will bootstrap in background" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.485423 4800 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.485517 4800 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.487019 4800 server.go:997] "Starting client certificate rotation" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.487050 4800 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.487267 4800 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-03 10:29:02.839391531 +0000 UTC Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.487380 4800 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 931h11m43.352013868s for next certificate rotation Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.552432 4800 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.558145 4800 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.586049 4800 log.go:25] "Validated CRI v1 runtime API" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.664142 4800 log.go:25] "Validated CRI v1 image API" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.666870 4800 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.675479 4800 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-25-15-12-16-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.675529 4800 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.694607 4800 manager.go:217] Machine: {Timestamp:2025-11-25 15:17:19.691380574 +0000 UTC m=+0.745789436 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:f3b20845-f0f6-45af-84f7-77c49c8161e9 BootID:47449ece-b8c7-4e5f-9f20-f4807c2b7cf6 Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:4c:4c:8b Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:4c:4c:8b Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:de:16:7c Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:65:ce:84 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:f1:64:46 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:27:ec:45 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:79:e1:25 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:82:7b:17:6d:55:53 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:82:59:ad:55:cb:41 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.694942 4800 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.695169 4800 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.699346 4800 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.699845 4800 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.699961 4800 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.700384 4800 topology_manager.go:138] "Creating topology manager with none policy" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.700406 4800 container_manager_linux.go:303] "Creating device plugin manager" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.701391 4800 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.701456 4800 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.701937 4800 state_mem.go:36] "Initialized new in-memory state store" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.702118 4800 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.706829 4800 kubelet.go:418] "Attempting to sync node with API server" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.706927 4800 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.706987 4800 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.707020 4800 kubelet.go:324] "Adding apiserver pod source" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.707040 4800 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.715767 4800 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.717153 4800 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.720978 4800 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.722209 4800 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.722249 4800 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:19 crc kubenswrapper[4800]: E1125 15:17:19.722346 4800 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Nov 25 15:17:19 crc kubenswrapper[4800]: E1125 15:17:19.722350 4800 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.723673 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.723698 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.723705 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.723713 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.723725 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.723733 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.723740 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.723751 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.723760 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.723769 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.723779 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.723786 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.725232 4800 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.725689 4800 server.go:1280] "Started kubelet" Nov 25 15:17:19 crc systemd[1]: Started Kubernetes Kubelet. Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.727769 4800 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.728081 4800 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.728401 4800 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.728627 4800 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.729432 4800 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.729477 4800 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.729761 4800 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.729804 4800 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.729942 4800 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.729569 4800 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 15:32:22.839308608 +0000 UTC Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.730349 4800 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 792h15m3.108964739s for next certificate rotation Nov 25 15:17:19 crc kubenswrapper[4800]: E1125 15:17:19.730505 4800 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.736179 4800 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:19 crc kubenswrapper[4800]: E1125 15:17:19.740270 4800 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Nov 25 15:17:19 crc kubenswrapper[4800]: E1125 15:17:19.740490 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="200ms" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.740732 4800 factory.go:55] Registering systemd factory Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.740756 4800 factory.go:221] Registration of the systemd container factory successfully Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.741509 4800 factory.go:153] Registering CRI-O factory Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.741545 4800 factory.go:221] Registration of the crio container factory successfully Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.741634 4800 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.741665 4800 factory.go:103] Registering Raw factory Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.741685 4800 manager.go:1196] Started watching for new ooms in manager Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.742453 4800 manager.go:319] Starting recovery of all containers Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.742667 4800 server.go:460] "Adding debug handlers to kubelet server" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.750951 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751050 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751070 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751092 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751109 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751123 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751139 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751158 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751179 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751196 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751215 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751233 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751249 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751271 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751288 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751303 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751357 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751374 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751390 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751406 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751451 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751468 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751483 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751498 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751514 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751533 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: E1125 15:17:19.748201 4800 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.145:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b48e7dcdaf1a9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 15:17:19.725662633 +0000 UTC m=+0.780071115,LastTimestamp:2025-11-25 15:17:19.725662633 +0000 UTC m=+0.780071115,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751552 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751613 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751652 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751668 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751684 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751701 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751716 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751736 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751752 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751791 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751808 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751825 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751860 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751876 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751890 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751905 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751918 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751934 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751951 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751966 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751976 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.751989 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752002 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752015 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752031 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752049 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752070 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752087 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752102 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752119 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752133 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752149 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752163 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752176 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752194 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752208 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752275 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752315 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752333 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752348 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752361 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752381 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752395 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752410 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752428 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752444 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752460 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752473 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752486 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752501 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752522 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752538 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752553 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752567 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752583 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752596 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752612 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752627 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752641 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752655 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752671 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752684 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752696 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752713 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752728 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752742 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752758 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752771 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752784 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752801 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752816 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752832 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752868 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752888 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752902 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752919 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752933 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752949 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752971 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.752987 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753001 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753016 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753033 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753048 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753064 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753080 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753097 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753112 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753125 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753140 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753153 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753168 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753182 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753195 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753209 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753224 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753240 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753254 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753270 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753286 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753300 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753314 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753327 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753341 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753354 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753369 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753382 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753399 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753412 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753425 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753436 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753447 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753459 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753472 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753487 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753500 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753514 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753528 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753543 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753559 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753573 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753585 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753598 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753612 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753625 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753638 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753652 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753666 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753683 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753699 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753717 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753731 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753744 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753758 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753771 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753783 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753795 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753808 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753823 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753843 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753953 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753967 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.753988 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.754004 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.754017 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.754083 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.754101 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.754117 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756740 4800 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756777 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756799 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756827 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756861 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756879 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756894 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756909 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756923 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756938 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756952 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756966 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756982 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.756997 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757014 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757027 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757056 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757069 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757083 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757096 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757108 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757123 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757136 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757149 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757161 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757175 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757188 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757202 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757218 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757235 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757268 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757283 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757297 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757311 4800 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757326 4800 reconstruct.go:97] "Volume reconstruction finished" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.757337 4800 reconciler.go:26] "Reconciler: start to sync state" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.762486 4800 manager.go:324] Recovery completed Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.775139 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.776543 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.776590 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.776603 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.777402 4800 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.777417 4800 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.777434 4800 state_mem.go:36] "Initialized new in-memory state store" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.782290 4800 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.784011 4800 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.784047 4800 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.784073 4800 kubelet.go:2335] "Starting kubelet main sync loop" Nov 25 15:17:19 crc kubenswrapper[4800]: E1125 15:17:19.784152 4800 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 25 15:17:19 crc kubenswrapper[4800]: W1125 15:17:19.784958 4800 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:19 crc kubenswrapper[4800]: E1125 15:17:19.785022 4800 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Nov 25 15:17:19 crc kubenswrapper[4800]: E1125 15:17:19.831079 4800 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.858987 4800 policy_none.go:49] "None policy: Start" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.860180 4800 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.860217 4800 state_mem.go:35] "Initializing new in-memory state store" Nov 25 15:17:19 crc kubenswrapper[4800]: E1125 15:17:19.885153 4800 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.907151 4800 manager.go:334] "Starting Device Plugin manager" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.907322 4800 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.907337 4800 server.go:79] "Starting device plugin registration server" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.907783 4800 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.907803 4800 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.908052 4800 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.908128 4800 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 25 15:17:19 crc kubenswrapper[4800]: I1125 15:17:19.908135 4800 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 25 15:17:19 crc kubenswrapper[4800]: E1125 15:17:19.918327 4800 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 15:17:19 crc kubenswrapper[4800]: E1125 15:17:19.941689 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="400ms" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.008900 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.010602 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.010656 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.010672 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.010701 4800 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 15:17:20 crc kubenswrapper[4800]: E1125 15:17:20.011195 4800 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.145:6443: connect: connection refused" node="crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.086102 4800 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.086300 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.087971 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.088028 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.088042 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.088254 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.088618 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.088673 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.089440 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.089489 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.089502 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.089675 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.089712 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.089749 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.089761 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.089837 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.089894 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.090543 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.090581 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.090594 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.090647 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.090667 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.090676 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.090703 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.090898 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.090935 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.091535 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.091749 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.091803 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.092113 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.092247 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.092293 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.094170 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.094206 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.094219 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.094224 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.094253 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.094267 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.094233 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.094388 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.094399 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.094445 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.094474 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.095229 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.095257 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.095268 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161125 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161188 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161231 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161260 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161289 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161319 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161347 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161374 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161405 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161437 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161466 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161535 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161566 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161594 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.161619 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.212199 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.214053 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.214109 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.214133 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.214179 4800 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 15:17:20 crc kubenswrapper[4800]: E1125 15:17:20.214956 4800 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.145:6443: connect: connection refused" node="crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.262583 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263001 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.262870 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263068 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263026 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263089 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263133 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263157 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263189 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263212 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263230 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263237 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263246 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263269 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263213 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263294 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263284 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263307 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263189 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263304 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263352 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263391 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263408 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263448 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263448 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263471 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263512 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263544 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263561 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.263616 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: E1125 15:17:20.343198 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="800ms" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.428697 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.437909 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.459171 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.474709 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.482189 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 15:17:20 crc kubenswrapper[4800]: W1125 15:17:20.497338 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-8437c93444b547e47f5ef487ad2242ce0c74d20bb7bf01c0a44cdadef484d9dc WatchSource:0}: Error finding container 8437c93444b547e47f5ef487ad2242ce0c74d20bb7bf01c0a44cdadef484d9dc: Status 404 returned error can't find the container with id 8437c93444b547e47f5ef487ad2242ce0c74d20bb7bf01c0a44cdadef484d9dc Nov 25 15:17:20 crc kubenswrapper[4800]: W1125 15:17:20.498626 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-55aab4ea6a321c3e01bf01ebe7090bcdd041139c76498f72d556dfc3093efc62 WatchSource:0}: Error finding container 55aab4ea6a321c3e01bf01ebe7090bcdd041139c76498f72d556dfc3093efc62: Status 404 returned error can't find the container with id 55aab4ea6a321c3e01bf01ebe7090bcdd041139c76498f72d556dfc3093efc62 Nov 25 15:17:20 crc kubenswrapper[4800]: W1125 15:17:20.501362 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-e27c48acde874d8ee6bedc6af4ac3f18ec4193d970e97106f28797457edf0544 WatchSource:0}: Error finding container e27c48acde874d8ee6bedc6af4ac3f18ec4193d970e97106f28797457edf0544: Status 404 returned error can't find the container with id e27c48acde874d8ee6bedc6af4ac3f18ec4193d970e97106f28797457edf0544 Nov 25 15:17:20 crc kubenswrapper[4800]: W1125 15:17:20.502292 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-a5f51a86d3819c7f19f15c9104ea257030214bf4c29ae409085f35907638d1a3 WatchSource:0}: Error finding container a5f51a86d3819c7f19f15c9104ea257030214bf4c29ae409085f35907638d1a3: Status 404 returned error can't find the container with id a5f51a86d3819c7f19f15c9104ea257030214bf4c29ae409085f35907638d1a3 Nov 25 15:17:20 crc kubenswrapper[4800]: W1125 15:17:20.504030 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-cddade779a5d66013172c08b0721472b7b27f93d4228389fe5cadfb4185c6b97 WatchSource:0}: Error finding container cddade779a5d66013172c08b0721472b7b27f93d4228389fe5cadfb4185c6b97: Status 404 returned error can't find the container with id cddade779a5d66013172c08b0721472b7b27f93d4228389fe5cadfb4185c6b97 Nov 25 15:17:20 crc kubenswrapper[4800]: W1125 15:17:20.545974 4800 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:20 crc kubenswrapper[4800]: E1125 15:17:20.546104 4800 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.615898 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.617980 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.618020 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.618031 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.618058 4800 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 15:17:20 crc kubenswrapper[4800]: E1125 15:17:20.618730 4800 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.145:6443: connect: connection refused" node="crc" Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.730354 4800 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.789189 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8437c93444b547e47f5ef487ad2242ce0c74d20bb7bf01c0a44cdadef484d9dc"} Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.790049 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"55aab4ea6a321c3e01bf01ebe7090bcdd041139c76498f72d556dfc3093efc62"} Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.791232 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e27c48acde874d8ee6bedc6af4ac3f18ec4193d970e97106f28797457edf0544"} Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.797009 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"cddade779a5d66013172c08b0721472b7b27f93d4228389fe5cadfb4185c6b97"} Nov 25 15:17:20 crc kubenswrapper[4800]: I1125 15:17:20.798684 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a5f51a86d3819c7f19f15c9104ea257030214bf4c29ae409085f35907638d1a3"} Nov 25 15:17:20 crc kubenswrapper[4800]: W1125 15:17:20.839867 4800 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:20 crc kubenswrapper[4800]: E1125 15:17:20.839980 4800 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Nov 25 15:17:21 crc kubenswrapper[4800]: W1125 15:17:21.096588 4800 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:21 crc kubenswrapper[4800]: E1125 15:17:21.096710 4800 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Nov 25 15:17:21 crc kubenswrapper[4800]: E1125 15:17:21.144235 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="1.6s" Nov 25 15:17:21 crc kubenswrapper[4800]: W1125 15:17:21.152735 4800 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:21 crc kubenswrapper[4800]: E1125 15:17:21.152816 4800 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.419317 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.420642 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.420698 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.420711 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.420743 4800 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 15:17:21 crc kubenswrapper[4800]: E1125 15:17:21.421242 4800 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.145:6443: connect: connection refused" node="crc" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.730160 4800 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.803152 4800 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703" exitCode=0 Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.803234 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703"} Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.803293 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.804978 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.805033 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.805046 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.806375 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01"} Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.806439 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799"} Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.807801 4800 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="1be08a0f1b93b25a2a42af57b1453303ae5a0473598ea4b58fa01a5eaf632826" exitCode=0 Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.807895 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"1be08a0f1b93b25a2a42af57b1453303ae5a0473598ea4b58fa01a5eaf632826"} Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.807921 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.808737 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.808778 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.808790 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.810268 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475" exitCode=0 Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.810338 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475"} Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.810370 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.811650 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.811720 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.811740 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.812119 4800 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="89ce4a3f77be8bba1ad0d0416bcf90f9d72c2b181098e3c0640ad822968f306f" exitCode=0 Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.812150 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"89ce4a3f77be8bba1ad0d0416bcf90f9d72c2b181098e3c0640ad822968f306f"} Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.812243 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.813130 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.813158 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.813169 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.815339 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.816429 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.816464 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:21 crc kubenswrapper[4800]: I1125 15:17:21.816476 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.730244 4800 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:22 crc kubenswrapper[4800]: E1125 15:17:22.745387 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="3.2s" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.815715 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"470628f891898766fdab61961dd1b441cb35ae97f941cf532d8dbcdbd725a25c"} Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.815763 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.816874 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.816910 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.816920 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.818491 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.818474 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"48be4d0f7dd1ac847125ec8b42ea938331c5cb0ab2f86081de6abcd43cd08d9e"} Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.818607 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"20ff2f8c2e88b40d0f60aa08ddf34a804883aecc946cfef5dc366108603b49d4"} Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.818629 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c8784e959be8194e732572ea918ad9c2b97bd26e2cee3213b20d968cc3688aed"} Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.819351 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.819395 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.819411 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.821099 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2"} Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.821148 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.821158 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3"} Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.823192 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.823227 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.823239 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.823821 4800 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a94819c23435d4f79357fa9194be013b4cc560e4aedf03c9f06a587dc56ca338" exitCode=0 Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.823869 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a94819c23435d4f79357fa9194be013b4cc560e4aedf03c9f06a587dc56ca338"} Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.823980 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.824802 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.824879 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.824896 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.827015 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba"} Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.827048 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53"} Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.827060 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0"} Nov 25 15:17:22 crc kubenswrapper[4800]: I1125 15:17:22.827072 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9"} Nov 25 15:17:22 crc kubenswrapper[4800]: W1125 15:17:22.945992 4800 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:22 crc kubenswrapper[4800]: E1125 15:17:22.946090 4800 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.021378 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.022673 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.022696 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.022719 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.022740 4800 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 15:17:23 crc kubenswrapper[4800]: E1125 15:17:23.023232 4800 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.145:6443: connect: connection refused" node="crc" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.391557 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:23 crc kubenswrapper[4800]: W1125 15:17:23.597530 4800 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:23 crc kubenswrapper[4800]: E1125 15:17:23.597664 4800 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Nov 25 15:17:23 crc kubenswrapper[4800]: W1125 15:17:23.674451 4800 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:23 crc kubenswrapper[4800]: E1125 15:17:23.674586 4800 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.730407 4800 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.731300 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.832023 4800 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="c0b571c340ae74018edda8c33fc2dbfb6a7af5528a9d51d7e5fd0c7cfb9dbaa3" exitCode=0 Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.832131 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"c0b571c340ae74018edda8c33fc2dbfb6a7af5528a9d51d7e5fd0c7cfb9dbaa3"} Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.832204 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.836227 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.836296 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.836313 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.840606 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"508d2e605b013da47fa3df523139bb35037b5c6406b80f35c942d214e8ee473b"} Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.840665 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.840769 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.840855 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.840768 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.842214 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.842266 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.842277 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.842370 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.842404 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.842335 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.842417 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.842553 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.842584 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.842339 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.842755 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:23 crc kubenswrapper[4800]: I1125 15:17:23.842768 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:23 crc kubenswrapper[4800]: W1125 15:17:23.940965 4800 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.145:6443: connect: connection refused Nov 25 15:17:23 crc kubenswrapper[4800]: E1125 15:17:23.941042 4800 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.145:6443: connect: connection refused" logger="UnhandledError" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.846381 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"25613c5c86c4ef42866c17b5dc3c351a318f1d5b106d44803357ca3edf32967c"} Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.846440 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9d8dc939b4c0dc3d8bb129e6c347d14ff2952a480485572c64b5091a2a4016c8"} Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.846451 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.846512 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.846531 4800 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.846573 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.846458 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ee430e100f2a618a75006b35e1bb4c7b5dbf028e6f3fe6c0e60de38b9f028723"} Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.846620 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"818cde86f18214331508b7d620339e393770b487e7d8db2d8549953811cacbe7"} Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.847383 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.847417 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.847429 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.847461 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.847478 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.847490 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.847917 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.847949 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:24 crc kubenswrapper[4800]: I1125 15:17:24.847958 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:25 crc kubenswrapper[4800]: I1125 15:17:25.852291 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"515887f89df1e7842059815621604d81a2c5ae4843d6fa44a57d306a2296cc25"} Nov 25 15:17:25 crc kubenswrapper[4800]: I1125 15:17:25.852398 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:25 crc kubenswrapper[4800]: I1125 15:17:25.853163 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:25 crc kubenswrapper[4800]: I1125 15:17:25.853203 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:25 crc kubenswrapper[4800]: I1125 15:17:25.853212 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.080132 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.131894 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.132074 4800 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.132117 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.133206 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.133265 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.133277 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.224355 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.226026 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.226079 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.226089 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.226118 4800 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.560343 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.560604 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.562282 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.562334 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.562344 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.854347 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.856196 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.856229 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:26 crc kubenswrapper[4800]: I1125 15:17:26.856240 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.145123 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.145325 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.146690 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.146747 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.146760 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.755627 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.856290 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.858657 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.858740 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.858760 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.859883 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.860065 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.861272 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.861424 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:27 crc kubenswrapper[4800]: I1125 15:17:27.861551 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:29 crc kubenswrapper[4800]: I1125 15:17:29.665922 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:29 crc kubenswrapper[4800]: I1125 15:17:29.666585 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:29 crc kubenswrapper[4800]: I1125 15:17:29.665953 4800 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 15:17:29 crc kubenswrapper[4800]: I1125 15:17:29.667485 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 15:17:29 crc kubenswrapper[4800]: I1125 15:17:29.668080 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:29 crc kubenswrapper[4800]: I1125 15:17:29.668161 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:29 crc kubenswrapper[4800]: I1125 15:17:29.668203 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:29 crc kubenswrapper[4800]: I1125 15:17:29.668221 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:29 crc kubenswrapper[4800]: I1125 15:17:29.669236 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:29 crc kubenswrapper[4800]: I1125 15:17:29.669276 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:29 crc kubenswrapper[4800]: I1125 15:17:29.669285 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:29 crc kubenswrapper[4800]: I1125 15:17:29.672912 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:29 crc kubenswrapper[4800]: E1125 15:17:29.919247 4800 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 15:17:30 crc kubenswrapper[4800]: I1125 15:17:30.095215 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:30 crc kubenswrapper[4800]: I1125 15:17:30.670513 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:30 crc kubenswrapper[4800]: I1125 15:17:30.674785 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:30 crc kubenswrapper[4800]: I1125 15:17:30.674834 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:30 crc kubenswrapper[4800]: I1125 15:17:30.674871 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:31 crc kubenswrapper[4800]: I1125 15:17:31.673314 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:31 crc kubenswrapper[4800]: I1125 15:17:31.674417 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:31 crc kubenswrapper[4800]: I1125 15:17:31.674466 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:31 crc kubenswrapper[4800]: I1125 15:17:31.674481 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:31 crc kubenswrapper[4800]: I1125 15:17:31.678007 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:32 crc kubenswrapper[4800]: I1125 15:17:32.674982 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:32 crc kubenswrapper[4800]: I1125 15:17:32.676066 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:32 crc kubenswrapper[4800]: I1125 15:17:32.676139 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:32 crc kubenswrapper[4800]: I1125 15:17:32.676163 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:34 crc kubenswrapper[4800]: E1125 15:17:34.501384 4800 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": net/http: TLS handshake timeout" event="&Event{ObjectMeta:{crc.187b48e7dcdaf1a9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 15:17:19.725662633 +0000 UTC m=+0.780071115,LastTimestamp:2025-11-25 15:17:19.725662633 +0000 UTC m=+0.780071115,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 15:17:34 crc kubenswrapper[4800]: I1125 15:17:34.682486 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 15:17:34 crc kubenswrapper[4800]: I1125 15:17:34.684290 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="508d2e605b013da47fa3df523139bb35037b5c6406b80f35c942d214e8ee473b" exitCode=255 Nov 25 15:17:34 crc kubenswrapper[4800]: I1125 15:17:34.684346 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"508d2e605b013da47fa3df523139bb35037b5c6406b80f35c942d214e8ee473b"} Nov 25 15:17:34 crc kubenswrapper[4800]: I1125 15:17:34.684600 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:34 crc kubenswrapper[4800]: I1125 15:17:34.685620 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:34 crc kubenswrapper[4800]: I1125 15:17:34.685681 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:34 crc kubenswrapper[4800]: I1125 15:17:34.685693 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:34 crc kubenswrapper[4800]: I1125 15:17:34.686414 4800 scope.go:117] "RemoveContainer" containerID="508d2e605b013da47fa3df523139bb35037b5c6406b80f35c942d214e8ee473b" Nov 25 15:17:34 crc kubenswrapper[4800]: I1125 15:17:34.730174 4800 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 25 15:17:35 crc kubenswrapper[4800]: I1125 15:17:35.291666 4800 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 15:17:35 crc kubenswrapper[4800]: I1125 15:17:35.291764 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 15:17:35 crc kubenswrapper[4800]: I1125 15:17:35.296592 4800 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 15:17:35 crc kubenswrapper[4800]: I1125 15:17:35.296711 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 15:17:35 crc kubenswrapper[4800]: I1125 15:17:35.689662 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 15:17:35 crc kubenswrapper[4800]: I1125 15:17:35.691106 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff"} Nov 25 15:17:35 crc kubenswrapper[4800]: I1125 15:17:35.691270 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:35 crc kubenswrapper[4800]: I1125 15:17:35.692177 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:35 crc kubenswrapper[4800]: I1125 15:17:35.692209 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:35 crc kubenswrapper[4800]: I1125 15:17:35.692219 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.145808 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.146116 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.147753 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.147807 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.147817 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.794071 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.794356 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.795902 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.795954 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.795968 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.810147 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.867984 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.868206 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.869536 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.869595 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.869615 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:37 crc kubenswrapper[4800]: I1125 15:17:37.874397 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:38 crc kubenswrapper[4800]: I1125 15:17:38.698347 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:38 crc kubenswrapper[4800]: I1125 15:17:38.698426 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:38 crc kubenswrapper[4800]: I1125 15:17:38.699393 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:38 crc kubenswrapper[4800]: I1125 15:17:38.699430 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:38 crc kubenswrapper[4800]: I1125 15:17:38.699447 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:38 crc kubenswrapper[4800]: I1125 15:17:38.699674 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:38 crc kubenswrapper[4800]: I1125 15:17:38.699697 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:38 crc kubenswrapper[4800]: I1125 15:17:38.699709 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:39 crc kubenswrapper[4800]: I1125 15:17:39.561685 4800 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 15:17:39 crc kubenswrapper[4800]: I1125 15:17:39.561794 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 15:17:39 crc kubenswrapper[4800]: E1125 15:17:39.919379 4800 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.281188 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.286560 4800 trace.go:236] Trace[991804815]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 15:17:27.218) (total time: 13067ms): Nov 25 15:17:40 crc kubenswrapper[4800]: Trace[991804815]: ---"Objects listed" error: 13067ms (15:17:40.286) Nov 25 15:17:40 crc kubenswrapper[4800]: Trace[991804815]: [13.067588478s] [13.067588478s] END Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.286593 4800 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.288414 4800 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.290025 4800 trace.go:236] Trace[341530770]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 15:17:29.666) (total time: 10623ms): Nov 25 15:17:40 crc kubenswrapper[4800]: Trace[341530770]: ---"Objects listed" error: 10623ms (15:17:40.289) Nov 25 15:17:40 crc kubenswrapper[4800]: Trace[341530770]: [10.623965859s] [10.623965859s] END Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.290051 4800 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.290092 4800 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.290477 4800 trace.go:236] Trace[726597306]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 15:17:27.116) (total time: 13173ms): Nov 25 15:17:40 crc kubenswrapper[4800]: Trace[726597306]: ---"Objects listed" error: 13173ms (15:17:40.290) Nov 25 15:17:40 crc kubenswrapper[4800]: Trace[726597306]: [13.173715605s] [13.173715605s] END Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.290537 4800 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.296040 4800 trace.go:236] Trace[639648696]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 15:17:29.665) (total time: 10630ms): Nov 25 15:17:40 crc kubenswrapper[4800]: Trace[639648696]: ---"Objects listed" error: 10630ms (15:17:40.295) Nov 25 15:17:40 crc kubenswrapper[4800]: Trace[639648696]: [10.630585331s] [10.630585331s] END Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.296066 4800 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.671490 4800 apiserver.go:52] "Watching apiserver" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.723146 4800 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.723481 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g"] Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.724078 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.724168 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.724237 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.724517 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.724542 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.724584 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.724645 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.724763 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.724884 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.727638 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.727982 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.728097 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.728244 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.729775 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.733067 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.733511 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.733958 4800 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.734518 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.740513 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793238 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793304 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793329 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793351 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793371 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793391 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793409 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793427 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793466 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793486 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793508 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793525 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793543 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793563 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793578 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793595 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793615 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793636 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793654 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793671 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793690 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793709 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793690 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793727 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793821 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793866 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793889 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793909 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793928 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793946 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793964 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.793988 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794006 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794029 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794051 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794054 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794074 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794109 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794127 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794145 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794163 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794179 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794196 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794220 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794238 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794260 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794278 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794294 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794313 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794332 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794352 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794375 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794399 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794422 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794442 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794460 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794483 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794502 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794552 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794573 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794600 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794618 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794636 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794654 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794676 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794698 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794746 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794780 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794796 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794814 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794833 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794875 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794892 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794910 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794929 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794955 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794976 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794997 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795020 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795040 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795058 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795074 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795091 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795108 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795124 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795143 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795164 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795182 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795201 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795219 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795241 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795260 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795276 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795292 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795310 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795327 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795346 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795361 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795382 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795401 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795419 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795436 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795455 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795472 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795488 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795506 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795521 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795542 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795562 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795579 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795597 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795614 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795629 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795646 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795667 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795689 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795706 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795722 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795739 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795756 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795775 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795792 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795812 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795831 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795887 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795908 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795928 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795945 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795965 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795984 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796002 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796020 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796040 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796058 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796075 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796090 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796109 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796127 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796148 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796179 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796213 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796276 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796304 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796328 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796351 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796374 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796401 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796446 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796474 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796501 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796529 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796552 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796580 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796603 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796625 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796658 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796686 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796711 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796741 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796767 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796793 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796816 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796862 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796892 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796920 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796947 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.798862 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.798894 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.798925 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.798948 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.798968 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799044 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799078 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799100 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799127 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799151 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799173 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799213 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799273 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799296 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799318 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799341 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799364 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799712 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799740 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799761 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799823 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799944 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799968 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799992 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800015 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800036 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800059 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800081 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800102 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800124 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800149 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800212 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800261 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800294 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800320 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800345 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800394 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800469 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800495 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800522 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800547 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800570 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800594 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800618 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800643 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.800739 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.811515 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794312 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794305 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794472 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794497 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794630 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794789 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794927 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.794958 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795124 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795137 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795291 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795374 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795517 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795638 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795887 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795912 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.795957 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796080 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796365 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796391 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796453 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796572 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796599 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796751 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.796941 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.797093 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.797294 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.797320 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.797444 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.797542 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.797551 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.797605 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.797717 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.797772 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.798015 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.798074 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.818083 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.798346 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.798648 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799344 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799476 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.799570 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.801815 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.802655 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.803956 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.804488 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.804718 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.804836 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.805122 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.805187 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.805405 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.805677 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.805805 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.805907 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.806049 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.806178 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.806360 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.806572 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.806737 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.807393 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.807601 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.807607 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.807812 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.807916 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.808052 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.808283 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.808383 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.808610 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.808701 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.809074 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.809080 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.809318 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.809326 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.809502 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.809649 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.809721 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.809959 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.809978 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.810541 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.811211 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.811254 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.811647 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.811989 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.812379 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.812448 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.812691 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.813618 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.813657 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.813912 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.814084 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.814202 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.814520 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.814525 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.814583 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.814654 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.814917 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.814949 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.814901 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.815022 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.815043 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.815185 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.815239 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.815260 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.815307 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.815358 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.815401 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.815421 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.816773 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.817060 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.816925 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.817129 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.817375 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.817409 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.817611 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.817745 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.798306 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.819204 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.819362 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.824802 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.827152 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.827693 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.828201 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.828204 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.828615 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.828628 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.828989 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.829020 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.829002 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.829262 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.829450 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.829474 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.829736 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.829833 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.829996 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:17:41.32996692 +0000 UTC m=+22.384375592 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.830120 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.830206 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.830374 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.830448 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.830574 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.830782 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.830947 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.831087 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.830741 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.831858 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.832036 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.832183 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.832220 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.832532 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.832867 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.832907 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.832963 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.833070 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.833296 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.833299 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.833506 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.833776 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.833871 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.834221 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.834239 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.834383 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.834511 4800 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.834537 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.834599 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:41.33457624 +0000 UTC m=+22.388984902 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.834617 4800 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.834685 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:41.334664933 +0000 UTC m=+22.389073415 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.835309 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.836349 4800 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.836521 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.838194 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.838413 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.838466 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.843101 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.843450 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.844401 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.847384 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.848354 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.852441 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.854496 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.854539 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.854557 4800 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.854651 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:41.354624261 +0000 UTC m=+22.409032813 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.856325 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.860038 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.861724 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.861956 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.863350 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.863612 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.864275 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.864897 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.865313 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.865787 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.865820 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.865839 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.869409 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.869551 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.869581 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.869563 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.869595 4800 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:40 crc kubenswrapper[4800]: E1125 15:17:40.869675 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:41.369630591 +0000 UTC m=+22.424039073 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.869738 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.870735 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.871022 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.869336 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.875132 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.875398 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.875497 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.875987 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.876141 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.876934 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.877195 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.877763 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.877971 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.887535 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.890256 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.892956 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.900878 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901426 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901508 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901581 4800 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901596 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901607 4800 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901617 4800 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901628 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901638 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901648 4800 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901660 4800 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901672 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901683 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901694 4800 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901703 4800 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901712 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901722 4800 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901731 4800 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901721 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901740 4800 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901799 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901811 4800 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901823 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901834 4800 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901861 4800 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901871 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901881 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901891 4800 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901882 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901901 4800 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.901996 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902012 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902028 4800 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902062 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902076 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902090 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902102 4800 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902115 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902129 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902141 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902154 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902167 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902179 4800 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902192 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902204 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902215 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902226 4800 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902238 4800 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902251 4800 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902262 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902274 4800 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902286 4800 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902298 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902335 4800 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902346 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902358 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902384 4800 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902403 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902412 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902440 4800 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902453 4800 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902466 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902478 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902489 4800 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902498 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902506 4800 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902516 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902525 4800 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902534 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902544 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902553 4800 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902562 4800 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902571 4800 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902582 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902591 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902601 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902611 4800 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902621 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902631 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902640 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902651 4800 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902661 4800 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902671 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902680 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902689 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902698 4800 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902709 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902718 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902728 4800 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902738 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902761 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902771 4800 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902780 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902789 4800 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902799 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902808 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902817 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902825 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902834 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902863 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902872 4800 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902881 4800 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902891 4800 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902900 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902910 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902920 4800 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902931 4800 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902940 4800 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902951 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902961 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902971 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902980 4800 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902989 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.902999 4800 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903009 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903020 4800 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903029 4800 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903038 4800 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903047 4800 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903056 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903065 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903078 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903087 4800 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903097 4800 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903107 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903116 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903126 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903134 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903144 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903152 4800 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903162 4800 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903171 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903180 4800 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903189 4800 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903198 4800 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903207 4800 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903217 4800 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903226 4800 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903235 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903243 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903253 4800 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903262 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903271 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903281 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903291 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903300 4800 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903309 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903318 4800 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903328 4800 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903338 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903359 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903369 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903379 4800 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903391 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903401 4800 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903410 4800 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903420 4800 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903429 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903441 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903451 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903461 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903470 4800 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903480 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903490 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903500 4800 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903509 4800 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903518 4800 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903527 4800 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903535 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903545 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903554 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903563 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903572 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903581 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903591 4800 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903601 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903611 4800 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903621 4800 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903630 4800 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903638 4800 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903647 4800 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903656 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903666 4800 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903675 4800 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903684 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903694 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903704 4800 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903714 4800 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903724 4800 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903733 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903743 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903752 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903761 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903769 4800 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903780 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903789 4800 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.903798 4800 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.907435 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.908356 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.922320 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.934742 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.951444 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.962745 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.974014 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:40 crc kubenswrapper[4800]: I1125 15:17:40.984677 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.005392 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.047466 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.061260 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 15:17:41 crc kubenswrapper[4800]: W1125 15:17:41.066642 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-15f507fbb1ab96f2dad93e8586d8896a9cf5bce0dd35515474c81aea9a5f13d2 WatchSource:0}: Error finding container 15f507fbb1ab96f2dad93e8586d8896a9cf5bce0dd35515474c81aea9a5f13d2: Status 404 returned error can't find the container with id 15f507fbb1ab96f2dad93e8586d8896a9cf5bce0dd35515474c81aea9a5f13d2 Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.070058 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.408092 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.408177 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.408215 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.408243 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408265 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:17:42.408242126 +0000 UTC m=+23.462650618 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.408296 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408323 4800 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408373 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:42.408360429 +0000 UTC m=+23.462768911 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408419 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408435 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408449 4800 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408484 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:42.408475842 +0000 UTC m=+23.462884334 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408529 4800 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408670 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:42.408638506 +0000 UTC m=+23.463047158 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408558 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408732 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408752 4800 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.408800 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:42.40879168 +0000 UTC m=+23.463200362 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.680006 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-6tshx"] Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.680367 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-6tshx" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.682873 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.683049 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.683103 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.699255 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.707314 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"846b0a3c0f559be9eb4adfda57bc243e54e85d9503e1b1482bb808015486d4f3"} Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.710215 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec"} Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.710266 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"387fbf93a01a9c881f0d6d0f7e377e500ade33c311587d5001d02776e23c1b3a"} Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.712196 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c"} Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.712240 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"15f507fbb1ab96f2dad93e8586d8896a9cf5bce0dd35515474c81aea9a5f13d2"} Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.714041 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.714535 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.715877 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.716895 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff" exitCode=255 Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.716947 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff"} Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.717011 4800 scope.go:117] "RemoveContainer" containerID="508d2e605b013da47fa3df523139bb35037b5c6406b80f35c942d214e8ee473b" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.732553 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.747063 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.752680 4800 scope.go:117] "RemoveContainer" containerID="25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.752901 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.752929 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.760266 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.770914 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.780991 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.784297 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:41 crc kubenswrapper[4800]: E1125 15:17:41.784400 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.789009 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.789924 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.791609 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.791796 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.792710 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.795240 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.796083 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.796888 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.798268 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.799175 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.800460 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.801188 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.802664 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.803343 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.804763 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.805537 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.805658 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.807143 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.808122 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.809098 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.809795 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.810376 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.810712 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/33cfcf36-9b68-42c7-bc9c-261a04435b92-hosts-file\") pod \"node-resolver-6tshx\" (UID: \"33cfcf36-9b68-42c7-bc9c-261a04435b92\") " pod="openshift-dns/node-resolver-6tshx" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.810747 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7m9hk\" (UniqueName: \"kubernetes.io/projected/33cfcf36-9b68-42c7-bc9c-261a04435b92-kube-api-access-7m9hk\") pod \"node-resolver-6tshx\" (UID: \"33cfcf36-9b68-42c7-bc9c-261a04435b92\") " pod="openshift-dns/node-resolver-6tshx" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.811766 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.812350 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.812762 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.814066 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.814469 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.815873 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.816926 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.817629 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.818786 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.819855 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.820301 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.821450 4800 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.821620 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.825269 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.825768 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.826200 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.827805 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.827730 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://508d2e605b013da47fa3df523139bb35037b5c6406b80f35c942d214e8ee473b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:34Z\\\",\\\"message\\\":\\\"W1125 15:17:23.361564 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 15:17:23.362247 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764083843 cert, and key in /tmp/serving-cert-3448515324/serving-signer.crt, /tmp/serving-cert-3448515324/serving-signer.key\\\\nI1125 15:17:24.102274 1 observer_polling.go:159] Starting file observer\\\\nW1125 15:17:24.104782 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 15:17:24.105074 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:24.106685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3448515324/tls.crt::/tmp/serving-cert-3448515324/tls.key\\\\\\\"\\\\nF1125 15:17:34.366405 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.828807 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.829322 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.830491 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.831548 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.832177 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.833703 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.834659 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.836316 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.837119 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.838430 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.839579 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.842449 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.843503 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.843712 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.844570 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.845207 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.847209 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.848109 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.848757 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.855223 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.866289 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.876633 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.911680 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/33cfcf36-9b68-42c7-bc9c-261a04435b92-hosts-file\") pod \"node-resolver-6tshx\" (UID: \"33cfcf36-9b68-42c7-bc9c-261a04435b92\") " pod="openshift-dns/node-resolver-6tshx" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.911741 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7m9hk\" (UniqueName: \"kubernetes.io/projected/33cfcf36-9b68-42c7-bc9c-261a04435b92-kube-api-access-7m9hk\") pod \"node-resolver-6tshx\" (UID: \"33cfcf36-9b68-42c7-bc9c-261a04435b92\") " pod="openshift-dns/node-resolver-6tshx" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.911967 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/33cfcf36-9b68-42c7-bc9c-261a04435b92-hosts-file\") pod \"node-resolver-6tshx\" (UID: \"33cfcf36-9b68-42c7-bc9c-261a04435b92\") " pod="openshift-dns/node-resolver-6tshx" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.930802 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7m9hk\" (UniqueName: \"kubernetes.io/projected/33cfcf36-9b68-42c7-bc9c-261a04435b92-kube-api-access-7m9hk\") pod \"node-resolver-6tshx\" (UID: \"33cfcf36-9b68-42c7-bc9c-261a04435b92\") " pod="openshift-dns/node-resolver-6tshx" Nov 25 15:17:41 crc kubenswrapper[4800]: I1125 15:17:41.995976 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-6tshx" Nov 25 15:17:42 crc kubenswrapper[4800]: W1125 15:17:42.007688 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33cfcf36_9b68_42c7_bc9c_261a04435b92.slice/crio-a0f81010ef0706551bcb487dfb5d3d60340e29459ea12bb47ac6cf339dd57742 WatchSource:0}: Error finding container a0f81010ef0706551bcb487dfb5d3d60340e29459ea12bb47ac6cf339dd57742: Status 404 returned error can't find the container with id a0f81010ef0706551bcb487dfb5d3d60340e29459ea12bb47ac6cf339dd57742 Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.399127 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-6qf5g"] Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.399485 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-6qf5g" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.401564 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.401724 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.401878 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.402420 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.410477 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.418863 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.418936 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.418963 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.418987 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419015 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:17:44.418994431 +0000 UTC m=+25.473402913 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.419041 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419097 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419111 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419118 4800 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419124 4800 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419162 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:44.419153885 +0000 UTC m=+25.473562367 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419184 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419218 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419233 4800 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419188 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:44.419171015 +0000 UTC m=+25.473579497 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419304 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:44.419294809 +0000 UTC m=+25.473703281 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419185 4800 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.419343 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.419411 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:44.419388801 +0000 UTC m=+25.473797283 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.428566 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.441336 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://508d2e605b013da47fa3df523139bb35037b5c6406b80f35c942d214e8ee473b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:34Z\\\",\\\"message\\\":\\\"W1125 15:17:23.361564 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 15:17:23.362247 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764083843 cert, and key in /tmp/serving-cert-3448515324/serving-signer.crt, /tmp/serving-cert-3448515324/serving-signer.key\\\\nI1125 15:17:24.102274 1 observer_polling.go:159] Starting file observer\\\\nW1125 15:17:24.104782 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 15:17:24.105074 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:24.106685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3448515324/tls.crt::/tmp/serving-cert-3448515324/tls.key\\\\\\\"\\\\nF1125 15:17:34.366405 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.452453 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.462777 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.472164 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.482408 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.495385 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.519764 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62d7m\" (UniqueName: \"kubernetes.io/projected/a6935887-df54-43b1-a2ad-0cfb3c9d65dd-kube-api-access-62d7m\") pod \"node-ca-6qf5g\" (UID: \"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\") " pod="openshift-image-registry/node-ca-6qf5g" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.519825 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a6935887-df54-43b1-a2ad-0cfb3c9d65dd-host\") pod \"node-ca-6qf5g\" (UID: \"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\") " pod="openshift-image-registry/node-ca-6qf5g" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.519884 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a6935887-df54-43b1-a2ad-0cfb3c9d65dd-serviceca\") pod \"node-ca-6qf5g\" (UID: \"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\") " pod="openshift-image-registry/node-ca-6qf5g" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.620883 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a6935887-df54-43b1-a2ad-0cfb3c9d65dd-host\") pod \"node-ca-6qf5g\" (UID: \"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\") " pod="openshift-image-registry/node-ca-6qf5g" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.620928 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a6935887-df54-43b1-a2ad-0cfb3c9d65dd-serviceca\") pod \"node-ca-6qf5g\" (UID: \"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\") " pod="openshift-image-registry/node-ca-6qf5g" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.620972 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62d7m\" (UniqueName: \"kubernetes.io/projected/a6935887-df54-43b1-a2ad-0cfb3c9d65dd-kube-api-access-62d7m\") pod \"node-ca-6qf5g\" (UID: \"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\") " pod="openshift-image-registry/node-ca-6qf5g" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.621092 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a6935887-df54-43b1-a2ad-0cfb3c9d65dd-host\") pod \"node-ca-6qf5g\" (UID: \"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\") " pod="openshift-image-registry/node-ca-6qf5g" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.622159 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a6935887-df54-43b1-a2ad-0cfb3c9d65dd-serviceca\") pod \"node-ca-6qf5g\" (UID: \"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\") " pod="openshift-image-registry/node-ca-6qf5g" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.642887 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62d7m\" (UniqueName: \"kubernetes.io/projected/a6935887-df54-43b1-a2ad-0cfb3c9d65dd-kube-api-access-62d7m\") pod \"node-ca-6qf5g\" (UID: \"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\") " pod="openshift-image-registry/node-ca-6qf5g" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.712808 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-6qf5g" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.725060 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.728163 4800 scope.go:117] "RemoveContainer" containerID="25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff" Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.728370 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.736689 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae"} Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.738127 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-6tshx" event={"ID":"33cfcf36-9b68-42c7-bc9c-261a04435b92","Type":"ContainerStarted","Data":"6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4"} Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.738193 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-6tshx" event={"ID":"33cfcf36-9b68-42c7-bc9c-261a04435b92","Type":"ContainerStarted","Data":"a0f81010ef0706551bcb487dfb5d3d60340e29459ea12bb47ac6cf339dd57742"} Nov 25 15:17:42 crc kubenswrapper[4800]: W1125 15:17:42.745664 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6935887_df54_43b1_a2ad_0cfb3c9d65dd.slice/crio-d041a11fa6b686fb15f847655df27e78e57be217cdb6395b0172966c827425e2 WatchSource:0}: Error finding container d041a11fa6b686fb15f847655df27e78e57be217cdb6395b0172966c827425e2: Status 404 returned error can't find the container with id d041a11fa6b686fb15f847655df27e78e57be217cdb6395b0172966c827425e2 Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.749070 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.760436 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.771988 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.783388 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.784596 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.784866 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.784623 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:42 crc kubenswrapper[4800]: E1125 15:17:42.785030 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.798862 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.810993 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.822331 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.835866 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.852699 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.870734 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.887993 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.905109 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.923271 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.936683 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.946089 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.953815 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.964889 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:42 crc kubenswrapper[4800]: I1125 15:17:42.978222 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.242739 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-hvg6z"] Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.243131 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-84zhh"] Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.243251 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.243694 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: W1125 15:17:43.247992 4800 reflector.go:561] object-"openshift-multus"/"default-cni-sysctl-allowlist": failed to list *v1.ConfigMap: configmaps "default-cni-sysctl-allowlist" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 25 15:17:43 crc kubenswrapper[4800]: W1125 15:17:43.248004 4800 reflector.go:561] object-"openshift-multus"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 25 15:17:43 crc kubenswrapper[4800]: E1125 15:17:43.248050 4800 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"default-cni-sysctl-allowlist\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"default-cni-sysctl-allowlist\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 15:17:43 crc kubenswrapper[4800]: E1125 15:17:43.248086 4800 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 15:17:43 crc kubenswrapper[4800]: W1125 15:17:43.249292 4800 reflector.go:561] object-"openshift-multus"/"cni-copy-resources": failed to list *v1.ConfigMap: configmaps "cni-copy-resources" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 25 15:17:43 crc kubenswrapper[4800]: E1125 15:17:43.249321 4800 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"cni-copy-resources\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"cni-copy-resources\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 15:17:43 crc kubenswrapper[4800]: W1125 15:17:43.249396 4800 reflector.go:561] object-"openshift-multus"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 25 15:17:43 crc kubenswrapper[4800]: E1125 15:17:43.249426 4800 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.255733 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-nzxgf"] Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.256152 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.259260 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.259399 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 15:17:43 crc kubenswrapper[4800]: W1125 15:17:43.259495 4800 reflector.go:561] object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": failed to list *v1.Secret: secrets "machine-config-daemon-dockercfg-r5tcq" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Nov 25 15:17:43 crc kubenswrapper[4800]: E1125 15:17:43.259528 4800 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"machine-config-daemon-dockercfg-r5tcq\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-config-daemon-dockercfg-r5tcq\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.260212 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.261555 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 15:17:43 crc kubenswrapper[4800]: W1125 15:17:43.261557 4800 reflector.go:561] object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz": failed to list *v1.Secret: secrets "multus-ancillary-tools-dockercfg-vnmsz" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 25 15:17:43 crc kubenswrapper[4800]: E1125 15:17:43.261699 4800 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"multus-ancillary-tools-dockercfg-vnmsz\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"multus-ancillary-tools-dockercfg-vnmsz\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.262240 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 15:17:43 crc kubenswrapper[4800]: W1125 15:17:43.266581 4800 reflector.go:561] object-"openshift-machine-config-operator"/"proxy-tls": failed to list *v1.Secret: secrets "proxy-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Nov 25 15:17:43 crc kubenswrapper[4800]: E1125 15:17:43.266650 4800 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"proxy-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"proxy-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.288692 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.309330 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.325725 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5465\" (UniqueName: \"kubernetes.io/projected/9a80af7a-a7d6-4433-97da-7d5d015cd401-kube-api-access-w5465\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.325792 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/69669849-59a1-47d8-9583-4ed964926242-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.325816 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v95d\" (UniqueName: \"kubernetes.io/projected/69669849-59a1-47d8-9583-4ed964926242-kube-api-access-2v95d\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.325877 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9a80af7a-a7d6-4433-97da-7d5d015cd401-proxy-tls\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.325980 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/69669849-59a1-47d8-9583-4ed964926242-system-cni-dir\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.326050 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/69669849-59a1-47d8-9583-4ed964926242-cnibin\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.326074 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/69669849-59a1-47d8-9583-4ed964926242-cni-binary-copy\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.326103 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9a80af7a-a7d6-4433-97da-7d5d015cd401-rootfs\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.326120 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/69669849-59a1-47d8-9583-4ed964926242-os-release\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.326146 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/69669849-59a1-47d8-9583-4ed964926242-tuning-conf-dir\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.326221 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9a80af7a-a7d6-4433-97da-7d5d015cd401-mcd-auth-proxy-config\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.347076 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.364679 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.379921 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.392746 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.408064 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.421312 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.426994 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/69669849-59a1-47d8-9583-4ed964926242-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427042 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-cnibin\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427061 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-multus-socket-dir-parent\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427080 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/69669849-59a1-47d8-9583-4ed964926242-cnibin\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427148 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/69669849-59a1-47d8-9583-4ed964926242-cni-binary-copy\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427236 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/69669849-59a1-47d8-9583-4ed964926242-cnibin\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427342 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-var-lib-cni-multus\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427367 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-os-release\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427398 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-multus-conf-dir\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427417 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-etc-kubernetes\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427444 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v95d\" (UniqueName: \"kubernetes.io/projected/69669849-59a1-47d8-9583-4ed964926242-kube-api-access-2v95d\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427465 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9a80af7a-a7d6-4433-97da-7d5d015cd401-proxy-tls\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427556 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/69669849-59a1-47d8-9583-4ed964926242-system-cni-dir\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427632 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9a80af7a-a7d6-4433-97da-7d5d015cd401-rootfs\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427674 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/69669849-59a1-47d8-9583-4ed964926242-os-release\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427694 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/69669849-59a1-47d8-9583-4ed964926242-system-cni-dir\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427744 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9a80af7a-a7d6-4433-97da-7d5d015cd401-rootfs\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427801 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-var-lib-cni-bin\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.427935 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-run-multus-certs\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428010 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/69669849-59a1-47d8-9583-4ed964926242-tuning-conf-dir\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428018 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/69669849-59a1-47d8-9583-4ed964926242-os-release\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428050 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-system-cni-dir\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428104 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0321f61a-9e40-47a2-b19f-a859fd6b890a-cni-binary-copy\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428145 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-run-k8s-cni-cncf-io\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428181 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-hostroot\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428233 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-var-lib-kubelet\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428288 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-multus-cni-dir\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428326 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24wm9\" (UniqueName: \"kubernetes.io/projected/0321f61a-9e40-47a2-b19f-a859fd6b890a-kube-api-access-24wm9\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428372 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9a80af7a-a7d6-4433-97da-7d5d015cd401-mcd-auth-proxy-config\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428435 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5465\" (UniqueName: \"kubernetes.io/projected/9a80af7a-a7d6-4433-97da-7d5d015cd401-kube-api-access-w5465\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428543 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-run-netns\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.428575 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0321f61a-9e40-47a2-b19f-a859fd6b890a-multus-daemon-config\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.429673 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9a80af7a-a7d6-4433-97da-7d5d015cd401-mcd-auth-proxy-config\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.435675 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.436033 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/69669849-59a1-47d8-9583-4ed964926242-tuning-conf-dir\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.448934 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5465\" (UniqueName: \"kubernetes.io/projected/9a80af7a-a7d6-4433-97da-7d5d015cd401-kube-api-access-w5465\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.457829 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.472770 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.487789 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.502427 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.519732 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529363 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-var-lib-cni-bin\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529413 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-run-multus-certs\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529436 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-system-cni-dir\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529460 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0321f61a-9e40-47a2-b19f-a859fd6b890a-cni-binary-copy\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529481 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-run-k8s-cni-cncf-io\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529501 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-var-lib-kubelet\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529519 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-hostroot\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529546 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24wm9\" (UniqueName: \"kubernetes.io/projected/0321f61a-9e40-47a2-b19f-a859fd6b890a-kube-api-access-24wm9\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529559 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-run-k8s-cni-cncf-io\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529573 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-multus-cni-dir\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529661 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-hostroot\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529702 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-run-netns\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529680 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-run-netns\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529782 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0321f61a-9e40-47a2-b19f-a859fd6b890a-multus-daemon-config\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529798 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-var-lib-kubelet\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529853 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-cnibin\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529829 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-system-cni-dir\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529879 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-multus-socket-dir-parent\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529987 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-multus-cni-dir\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.529512 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-var-lib-cni-bin\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.530040 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-var-lib-cni-multus\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.530084 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-multus-socket-dir-parent\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.530182 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-var-lib-cni-multus\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.530211 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-cnibin\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.530220 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-os-release\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.530264 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-os-release\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.530289 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-host-run-multus-certs\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.530342 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-multus-conf-dir\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.530364 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-etc-kubernetes\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.530447 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-multus-conf-dir\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.530454 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0321f61a-9e40-47a2-b19f-a859fd6b890a-etc-kubernetes\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.530582 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0321f61a-9e40-47a2-b19f-a859fd6b890a-multus-daemon-config\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.537295 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.559216 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.572788 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.594189 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.610729 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.626774 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.640616 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mvthw"] Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.641685 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.646339 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.646515 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.646617 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.647311 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.647350 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.647621 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.647643 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.647937 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.665258 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.677082 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.690142 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.701604 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.720611 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732514 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-etc-openvswitch\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732564 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jbt5\" (UniqueName: \"kubernetes.io/projected/80e4f44d-4647-4e15-a29f-2672fc065d82-kube-api-access-4jbt5\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732607 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-cni-bin\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732628 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-env-overrides\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732656 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-run-ovn-kubernetes\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732694 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-kubelet\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732709 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-run-netns\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732727 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-node-log\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732742 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-log-socket\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732760 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-slash\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732780 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-var-lib-openvswitch\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732832 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-cni-netd\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732894 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-ovnkube-config\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732919 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-systemd\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732937 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732958 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-ovn\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.732995 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-ovnkube-script-lib\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.733063 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-systemd-units\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.733141 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/80e4f44d-4647-4e15-a29f-2672fc065d82-ovn-node-metrics-cert\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.733230 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-openvswitch\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.738262 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.741934 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-6qf5g" event={"ID":"a6935887-df54-43b1-a2ad-0cfb3c9d65dd","Type":"ContainerStarted","Data":"bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b"} Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.741976 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-6qf5g" event={"ID":"a6935887-df54-43b1-a2ad-0cfb3c9d65dd","Type":"ContainerStarted","Data":"d041a11fa6b686fb15f847655df27e78e57be217cdb6395b0172966c827425e2"} Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.743920 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241"} Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.750694 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.765277 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.776760 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.785287 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:43 crc kubenswrapper[4800]: E1125 15:17:43.785693 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.793524 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.808639 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.822731 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.834916 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-ovnkube-script-lib\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.835355 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-systemd-units\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.835439 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-systemd-units\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.835560 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/80e4f44d-4647-4e15-a29f-2672fc065d82-ovn-node-metrics-cert\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.835723 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-openvswitch\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.835906 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-etc-openvswitch\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836030 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jbt5\" (UniqueName: \"kubernetes.io/projected/80e4f44d-4647-4e15-a29f-2672fc065d82-kube-api-access-4jbt5\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836107 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-etc-openvswitch\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.835775 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-openvswitch\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.835959 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-ovnkube-script-lib\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836198 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836160 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-cni-bin\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836305 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-env-overrides\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836353 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-run-ovn-kubernetes\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836412 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-run-ovn-kubernetes\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836454 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-kubelet\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836551 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-run-netns\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836570 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-node-log\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836592 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-log-socket\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836611 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-ovnkube-config\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836632 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-run-netns\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836636 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-slash\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836659 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-slash\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836692 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-node-log\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836690 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-var-lib-openvswitch\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836723 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-log-socket\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836732 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-cni-netd\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836781 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-systemd\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836798 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836816 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-var-lib-openvswitch\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836831 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-ovn\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836878 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-ovn\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836882 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-env-overrides\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.836918 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-systemd\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.837047 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.837058 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-kubelet\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.837100 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-cni-bin\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.837081 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-cni-netd\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.837441 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-ovnkube-config\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.840724 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/80e4f44d-4647-4e15-a29f-2672fc065d82-ovn-node-metrics-cert\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.852015 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.852495 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jbt5\" (UniqueName: \"kubernetes.io/projected/80e4f44d-4647-4e15-a29f-2672fc065d82-kube-api-access-4jbt5\") pod \"ovnkube-node-mvthw\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.869157 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.880760 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.892675 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.910042 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.926479 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.939940 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.957042 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.959323 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.975613 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:43 crc kubenswrapper[4800]: I1125 15:17:43.993064 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:43Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.009428 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:44Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.026149 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:44Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.040924 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:44Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.053823 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:44Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.065166 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:44Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:44 crc kubenswrapper[4800]: W1125 15:17:44.230555 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80e4f44d_4647_4e15_a29f_2672fc065d82.slice/crio-3f3331c454b12866d65a3ec569560dd1a858f3a483355ab3d0fc8919e228f493 WatchSource:0}: Error finding container 3f3331c454b12866d65a3ec569560dd1a858f3a483355ab3d0fc8919e228f493: Status 404 returned error can't find the container with id 3f3331c454b12866d65a3ec569560dd1a858f3a483355ab3d0fc8919e228f493 Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.427636 4800 configmap.go:193] Couldn't get configMap openshift-multus/cni-copy-resources: failed to sync configmap cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.427742 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/69669849-59a1-47d8-9583-4ed964926242-cni-binary-copy podName:69669849-59a1-47d8-9583-4ed964926242 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:44.927719869 +0000 UTC m=+25.982128351 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cni-binary-copy" (UniqueName: "kubernetes.io/configmap/69669849-59a1-47d8-9583-4ed964926242-cni-binary-copy") pod "multus-additional-cni-plugins-84zhh" (UID: "69669849-59a1-47d8-9583-4ed964926242") : failed to sync configmap cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.427636 4800 configmap.go:193] Couldn't get configMap openshift-multus/default-cni-sysctl-allowlist: failed to sync configmap cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.427816 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/69669849-59a1-47d8-9583-4ed964926242-cni-sysctl-allowlist podName:69669849-59a1-47d8-9583-4ed964926242 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:44.927798921 +0000 UTC m=+25.982207413 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cni-sysctl-allowlist" (UniqueName: "kubernetes.io/configmap/69669849-59a1-47d8-9583-4ed964926242-cni-sysctl-allowlist") pod "multus-additional-cni-plugins-84zhh" (UID: "69669849-59a1-47d8-9583-4ed964926242") : failed to sync configmap cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.428766 4800 secret.go:188] Couldn't get secret openshift-machine-config-operator/proxy-tls: failed to sync secret cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.428903 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a80af7a-a7d6-4433-97da-7d5d015cd401-proxy-tls podName:9a80af7a-a7d6-4433-97da-7d5d015cd401 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:44.928875278 +0000 UTC m=+25.983283790 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/9a80af7a-a7d6-4433-97da-7d5d015cd401-proxy-tls") pod "machine-config-daemon-hvg6z" (UID: "9a80af7a-a7d6-4433-97da-7d5d015cd401") : failed to sync secret cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.441932 4800 projected.go:288] Couldn't get configMap openshift-multus/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.442138 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.442215 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.442269 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.442289 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442380 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:17:48.442348308 +0000 UTC m=+29.496756790 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442458 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442468 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442484 4800 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442526 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:48.442513502 +0000 UTC m=+29.496921984 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.442522 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442570 4800 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442590 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:48.442584484 +0000 UTC m=+29.496992966 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442397 4800 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442695 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442716 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442728 4800 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442857 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:48.442744958 +0000 UTC m=+29.497153440 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.442897 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:48.442883422 +0000 UTC m=+29.497291904 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.470126 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.529951 4800 configmap.go:193] Couldn't get configMap openshift-multus/cni-copy-resources: failed to sync configmap cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.530044 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0321f61a-9e40-47a2-b19f-a859fd6b890a-cni-binary-copy podName:0321f61a-9e40-47a2-b19f-a859fd6b890a nodeName:}" failed. No retries permitted until 2025-11-25 15:17:45.030024895 +0000 UTC m=+26.084433377 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cni-binary-copy" (UniqueName: "kubernetes.io/configmap/0321f61a-9e40-47a2-b19f-a859fd6b890a-cni-binary-copy") pod "multus-nzxgf" (UID: "0321f61a-9e40-47a2-b19f-a859fd6b890a") : failed to sync configmap cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.545664 4800 projected.go:288] Couldn't get configMap openshift-multus/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.567370 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.572223 4800 projected.go:194] Error preparing data for projected volume kube-api-access-2v95d for pod openshift-multus/multus-additional-cni-plugins-84zhh: failed to sync configmap cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.572303 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/69669849-59a1-47d8-9583-4ed964926242-kube-api-access-2v95d podName:69669849-59a1-47d8-9583-4ed964926242 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:45.072282192 +0000 UTC m=+26.126690674 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-2v95d" (UniqueName: "kubernetes.io/projected/69669849-59a1-47d8-9583-4ed964926242-kube-api-access-2v95d") pod "multus-additional-cni-plugins-84zhh" (UID: "69669849-59a1-47d8-9583-4ed964926242") : failed to sync configmap cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.575989 4800 projected.go:194] Error preparing data for projected volume kube-api-access-24wm9 for pod openshift-multus/multus-nzxgf: failed to sync configmap cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.576055 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0321f61a-9e40-47a2-b19f-a859fd6b890a-kube-api-access-24wm9 podName:0321f61a-9e40-47a2-b19f-a859fd6b890a nodeName:}" failed. No retries permitted until 2025-11-25 15:17:45.07603125 +0000 UTC m=+26.130439732 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-24wm9" (UniqueName: "kubernetes.io/projected/0321f61a-9e40-47a2-b19f-a859fd6b890a-kube-api-access-24wm9") pod "multus-nzxgf" (UID: "0321f61a-9e40-47a2-b19f-a859fd6b890a") : failed to sync configmap cache: timed out waiting for the condition Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.643777 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.655457 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.674034 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.747120 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"3f3331c454b12866d65a3ec569560dd1a858f3a483355ab3d0fc8919e228f493"} Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.764547 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.784167 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.784350 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.784383 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.784551 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:17:44 crc kubenswrapper[4800]: E1125 15:17:44.784686 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.946165 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/69669849-59a1-47d8-9583-4ed964926242-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.946207 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/69669849-59a1-47d8-9583-4ed964926242-cni-binary-copy\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.946248 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9a80af7a-a7d6-4433-97da-7d5d015cd401-proxy-tls\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.946880 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/69669849-59a1-47d8-9583-4ed964926242-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.947230 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/69669849-59a1-47d8-9583-4ed964926242-cni-binary-copy\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:44 crc kubenswrapper[4800]: I1125 15:17:44.949420 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9a80af7a-a7d6-4433-97da-7d5d015cd401-proxy-tls\") pod \"machine-config-daemon-hvg6z\" (UID: \"9a80af7a-a7d6-4433-97da-7d5d015cd401\") " pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.047279 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0321f61a-9e40-47a2-b19f-a859fd6b890a-cni-binary-copy\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.048054 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0321f61a-9e40-47a2-b19f-a859fd6b890a-cni-binary-copy\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.056394 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.148277 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v95d\" (UniqueName: \"kubernetes.io/projected/69669849-59a1-47d8-9583-4ed964926242-kube-api-access-2v95d\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.148333 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24wm9\" (UniqueName: \"kubernetes.io/projected/0321f61a-9e40-47a2-b19f-a859fd6b890a-kube-api-access-24wm9\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.152216 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24wm9\" (UniqueName: \"kubernetes.io/projected/0321f61a-9e40-47a2-b19f-a859fd6b890a-kube-api-access-24wm9\") pod \"multus-nzxgf\" (UID: \"0321f61a-9e40-47a2-b19f-a859fd6b890a\") " pod="openshift-multus/multus-nzxgf" Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.153356 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v95d\" (UniqueName: \"kubernetes.io/projected/69669849-59a1-47d8-9583-4ed964926242-kube-api-access-2v95d\") pod \"multus-additional-cni-plugins-84zhh\" (UID: \"69669849-59a1-47d8-9583-4ed964926242\") " pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.366984 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-84zhh" Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.373925 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-nzxgf" Nov 25 15:17:45 crc kubenswrapper[4800]: W1125 15:17:45.377826 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69669849_59a1_47d8_9583_4ed964926242.slice/crio-95745ae2291e8d579e7809e96df625f7b8a74b0f07c605b5db602160e9e37db7 WatchSource:0}: Error finding container 95745ae2291e8d579e7809e96df625f7b8a74b0f07c605b5db602160e9e37db7: Status 404 returned error can't find the container with id 95745ae2291e8d579e7809e96df625f7b8a74b0f07c605b5db602160e9e37db7 Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.751051 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"a727adac7d12b67256917761a348018956ba2b41121294347368aa013a891216"} Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.752927 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42"} Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.754274 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-nzxgf" event={"ID":"0321f61a-9e40-47a2-b19f-a859fd6b890a","Type":"ContainerStarted","Data":"00c73893dffc740691217a713ff0f7627e73ccdeaa1093778825355a3ce27ff1"} Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.755175 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" event={"ID":"69669849-59a1-47d8-9583-4ed964926242","Type":"ContainerStarted","Data":"95745ae2291e8d579e7809e96df625f7b8a74b0f07c605b5db602160e9e37db7"} Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.785428 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:45 crc kubenswrapper[4800]: E1125 15:17:45.785615 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.804658 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:17:45 crc kubenswrapper[4800]: I1125 15:17:45.805491 4800 scope.go:117] "RemoveContainer" containerID="25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff" Nov 25 15:17:45 crc kubenswrapper[4800]: E1125 15:17:45.805726 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.565233 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.574021 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.577067 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.587995 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.609401 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.628160 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.647105 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.660739 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.681007 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.691108 4800 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.692979 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.693034 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.693050 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.693233 4800 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.703163 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.707217 4800 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.707556 4800 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.709106 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.709139 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.709147 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.709162 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.709171 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:46Z","lastTransitionTime":"2025-11-25T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.718289 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: E1125 15:17:46.731741 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.735756 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.735800 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.736010 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.736029 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.736041 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:46Z","lastTransitionTime":"2025-11-25T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.740516 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: E1125 15:17:46.750938 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.755979 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.756254 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.756331 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.756400 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.756455 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:46Z","lastTransitionTime":"2025-11-25T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.758439 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.761643 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da"} Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.763275 4800 generic.go:334] "Generic (PLEG): container finished" podID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerID="01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42" exitCode=0 Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.764221 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42"} Nov 25 15:17:46 crc kubenswrapper[4800]: E1125 15:17:46.769744 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: E1125 15:17:46.770120 4800 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.774263 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.774392 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.774632 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.774654 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.774752 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.774921 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:46Z","lastTransitionTime":"2025-11-25T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.784555 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:46 crc kubenswrapper[4800]: E1125 15:17:46.784664 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.784899 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:46 crc kubenswrapper[4800]: E1125 15:17:46.785098 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:17:46 crc kubenswrapper[4800]: E1125 15:17:46.787138 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.788772 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.791624 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.791777 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.791834 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.791930 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.791996 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:46Z","lastTransitionTime":"2025-11-25T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.803892 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: E1125 15:17:46.805629 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: E1125 15:17:46.805748 4800 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.807515 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.807555 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.807567 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.807617 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.807630 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:46Z","lastTransitionTime":"2025-11-25T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.816507 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.833063 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.859936 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.875136 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.887274 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.899683 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.911273 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.911321 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.911332 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.911348 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.911359 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:46Z","lastTransitionTime":"2025-11-25T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.912200 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.928596 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.944312 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.958011 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.971309 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:46 crc kubenswrapper[4800]: I1125 15:17:46.988391 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:46Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.005411 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.014158 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.014200 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.014211 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.014227 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.014240 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:47Z","lastTransitionTime":"2025-11-25T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.021374 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.116251 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.116288 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.116297 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.116310 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.116319 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:47Z","lastTransitionTime":"2025-11-25T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.219680 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.219711 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.219721 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.219734 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.219743 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:47Z","lastTransitionTime":"2025-11-25T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.340467 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.340506 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.340514 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.340533 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.340542 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:47Z","lastTransitionTime":"2025-11-25T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.443283 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.443324 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.443334 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.443349 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.443362 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:47Z","lastTransitionTime":"2025-11-25T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.545756 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.545797 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.545808 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.545829 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.545856 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:47Z","lastTransitionTime":"2025-11-25T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.648774 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.648812 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.648822 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.648856 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.648866 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:47Z","lastTransitionTime":"2025-11-25T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.751426 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.751481 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.751493 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.751511 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.751525 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:47Z","lastTransitionTime":"2025-11-25T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.768440 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-nzxgf" event={"ID":"0321f61a-9e40-47a2-b19f-a859fd6b890a","Type":"ContainerStarted","Data":"7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.772286 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.772409 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.772470 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.774180 4800 generic.go:334] "Generic (PLEG): container finished" podID="69669849-59a1-47d8-9583-4ed964926242" containerID="a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a" exitCode=0 Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.774299 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" event={"ID":"69669849-59a1-47d8-9583-4ed964926242","Type":"ContainerDied","Data":"a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.777097 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.784746 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:47 crc kubenswrapper[4800]: E1125 15:17:47.784966 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.801599 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.824960 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.844295 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.857424 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.857478 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.857490 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.857508 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.857521 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:47Z","lastTransitionTime":"2025-11-25T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.859710 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.877091 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.896296 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.916404 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.938417 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.955679 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.960184 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.960227 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.960240 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.960258 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.960270 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:47Z","lastTransitionTime":"2025-11-25T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.972329 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:47 crc kubenswrapper[4800]: I1125 15:17:47.990057 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:47Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.004442 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.018875 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.037497 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.051030 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.063231 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.063292 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.063302 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.063325 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.063334 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:48Z","lastTransitionTime":"2025-11-25T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.066341 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.083686 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.119602 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.134325 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.148793 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.160444 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.166019 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.166060 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.166072 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.166095 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.166112 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:48Z","lastTransitionTime":"2025-11-25T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.186007 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.202995 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.217143 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.236168 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.248705 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.268790 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.268850 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.268859 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.268874 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.268884 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:48Z","lastTransitionTime":"2025-11-25T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.283937 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.308555 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.371487 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.371541 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.371556 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.371575 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.371592 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:48Z","lastTransitionTime":"2025-11-25T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.474997 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.475052 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.475076 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.475117 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.475141 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:48Z","lastTransitionTime":"2025-11-25T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.489566 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.489716 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.489798 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.489899 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.489966 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.490180 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.490215 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.490242 4800 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.490328 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:56.490300856 +0000 UTC m=+37.544709378 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.491053 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:17:56.491031754 +0000 UTC m=+37.545440276 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.491192 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.491221 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.491245 4800 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.491307 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:56.491286541 +0000 UTC m=+37.545695063 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.491415 4800 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.491478 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:56.491459365 +0000 UTC m=+37.545867897 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.491581 4800 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.491645 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:56.49162057 +0000 UTC m=+37.546029112 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.578441 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.578528 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.578546 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.579008 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.579048 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:48Z","lastTransitionTime":"2025-11-25T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.682483 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.682564 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.682585 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.682612 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.682631 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:48Z","lastTransitionTime":"2025-11-25T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.783423 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" event={"ID":"69669849-59a1-47d8-9583-4ed964926242","Type":"ContainerStarted","Data":"9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.784216 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.784350 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.784502 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:17:48 crc kubenswrapper[4800]: E1125 15:17:48.784607 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.786050 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.786075 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.786083 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.786097 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.786109 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:48Z","lastTransitionTime":"2025-11-25T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.787501 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.787573 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.806735 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.820110 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.833487 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.846312 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.855664 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.889250 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.889348 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.889375 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.889410 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.889436 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:48Z","lastTransitionTime":"2025-11-25T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.910956 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.924541 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.947943 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.968225 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.980391 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.992475 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.992526 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.992545 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.992569 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.992588 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:48Z","lastTransitionTime":"2025-11-25T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:48 crc kubenswrapper[4800]: I1125 15:17:48.994644 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:48Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.008949 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.024777 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.042512 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.095217 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.095287 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.095303 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.095327 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.095343 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:49Z","lastTransitionTime":"2025-11-25T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.198341 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.198424 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.198444 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.198476 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.198499 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:49Z","lastTransitionTime":"2025-11-25T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.301620 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.301786 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.301817 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.301893 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.301921 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:49Z","lastTransitionTime":"2025-11-25T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.405192 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.405837 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.405958 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.406030 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.406111 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:49Z","lastTransitionTime":"2025-11-25T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.508962 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.509217 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.509310 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.509373 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.509428 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:49Z","lastTransitionTime":"2025-11-25T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.612533 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.612589 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.612598 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.612611 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.612624 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:49Z","lastTransitionTime":"2025-11-25T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.715438 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.715473 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.715484 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.715498 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.715506 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:49Z","lastTransitionTime":"2025-11-25T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.785047 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:49 crc kubenswrapper[4800]: E1125 15:17:49.785282 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.793557 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae"} Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.799829 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.809558 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.818678 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.818724 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.818774 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.818800 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.818815 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:49Z","lastTransitionTime":"2025-11-25T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.830604 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.845999 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.860540 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.883104 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.897257 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.908881 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.920592 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.920634 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.920644 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.920680 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.920690 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:49Z","lastTransitionTime":"2025-11-25T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.925201 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.942460 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.954404 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.969222 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:49 crc kubenswrapper[4800]: I1125 15:17:49.988454 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:49Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.003464 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:50Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.023881 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.023944 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.023958 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.023982 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.024000 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:50Z","lastTransitionTime":"2025-11-25T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.127564 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.127617 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.127628 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.127649 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.127661 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:50Z","lastTransitionTime":"2025-11-25T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.230727 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.231370 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.231388 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.231416 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.231433 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:50Z","lastTransitionTime":"2025-11-25T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.334216 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.334319 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.334341 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.334366 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.334384 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:50Z","lastTransitionTime":"2025-11-25T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.437166 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.437225 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.437240 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.437257 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.437269 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:50Z","lastTransitionTime":"2025-11-25T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.539477 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.539529 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.539541 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.539560 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.539572 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:50Z","lastTransitionTime":"2025-11-25T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.644427 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.644490 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.644510 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.644537 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.644559 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:50Z","lastTransitionTime":"2025-11-25T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.746922 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.746972 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.746989 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.747012 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.747031 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:50Z","lastTransitionTime":"2025-11-25T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.793385 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:50 crc kubenswrapper[4800]: E1125 15:17:50.793797 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.793385 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:50 crc kubenswrapper[4800]: E1125 15:17:50.794228 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.803707 4800 generic.go:334] "Generic (PLEG): container finished" podID="69669849-59a1-47d8-9583-4ed964926242" containerID="9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c" exitCode=0 Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.803766 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" event={"ID":"69669849-59a1-47d8-9583-4ed964926242","Type":"ContainerDied","Data":"9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c"} Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.828730 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:50Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.849395 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:50Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.849667 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.849687 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.849698 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.849716 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.849728 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:50Z","lastTransitionTime":"2025-11-25T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.863286 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:50Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.894355 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:50Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.909027 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:50Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.923104 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:50Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.938978 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:50Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.952580 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.952638 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.952652 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.952669 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.952684 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:50Z","lastTransitionTime":"2025-11-25T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.957435 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:50Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:50 crc kubenswrapper[4800]: I1125 15:17:50.992264 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:50Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.012118 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.023658 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.037703 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.050341 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.060236 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.060267 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.060276 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.062512 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.062831 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:51Z","lastTransitionTime":"2025-11-25T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.062920 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.166297 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.166355 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.166367 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.166384 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.166397 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:51Z","lastTransitionTime":"2025-11-25T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.268691 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.268731 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.268742 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.268759 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.268774 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:51Z","lastTransitionTime":"2025-11-25T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.371212 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.371250 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.371258 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.371272 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.371281 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:51Z","lastTransitionTime":"2025-11-25T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.474838 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.474928 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.474945 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.474971 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.474990 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:51Z","lastTransitionTime":"2025-11-25T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.577897 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.577970 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.577982 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.578002 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.578014 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:51Z","lastTransitionTime":"2025-11-25T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.680268 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.680318 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.680326 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.680341 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.680349 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:51Z","lastTransitionTime":"2025-11-25T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.782583 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.782633 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.782644 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.782663 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.782677 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:51Z","lastTransitionTime":"2025-11-25T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.785031 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:51 crc kubenswrapper[4800]: E1125 15:17:51.785189 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.810836 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451"} Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.813362 4800 generic.go:334] "Generic (PLEG): container finished" podID="69669849-59a1-47d8-9583-4ed964926242" containerID="dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426" exitCode=0 Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.813435 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" event={"ID":"69669849-59a1-47d8-9583-4ed964926242","Type":"ContainerDied","Data":"dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426"} Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.828883 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.844084 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.855394 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.871736 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.885829 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.886334 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.886385 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.886406 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.886431 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.886453 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:51Z","lastTransitionTime":"2025-11-25T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.903935 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.917392 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.938802 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.955753 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.971893 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.988674 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.988704 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.988712 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.988725 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.988734 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:51Z","lastTransitionTime":"2025-11-25T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:51 crc kubenswrapper[4800]: I1125 15:17:51.997079 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:51Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.010097 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.024708 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.038790 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.091543 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.091590 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.091602 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.091620 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.091633 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:52Z","lastTransitionTime":"2025-11-25T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.194992 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.195069 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.195082 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.195104 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.195118 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:52Z","lastTransitionTime":"2025-11-25T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.299010 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.299068 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.299078 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.299101 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.299111 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:52Z","lastTransitionTime":"2025-11-25T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.403673 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.403782 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.403797 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.403817 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.403833 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:52Z","lastTransitionTime":"2025-11-25T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.506683 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.506738 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.506752 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.506775 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.506789 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:52Z","lastTransitionTime":"2025-11-25T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.610554 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.610597 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.610606 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.610622 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.610635 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:52Z","lastTransitionTime":"2025-11-25T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.713695 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.713754 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.713771 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.713796 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.713812 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:52Z","lastTransitionTime":"2025-11-25T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.785366 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:52 crc kubenswrapper[4800]: E1125 15:17:52.785583 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.785396 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:52 crc kubenswrapper[4800]: E1125 15:17:52.786304 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.816250 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.816300 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.816314 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.816337 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.816348 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:52Z","lastTransitionTime":"2025-11-25T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.819707 4800 generic.go:334] "Generic (PLEG): container finished" podID="69669849-59a1-47d8-9583-4ed964926242" containerID="23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e" exitCode=0 Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.819832 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" event={"ID":"69669849-59a1-47d8-9583-4ed964926242","Type":"ContainerDied","Data":"23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e"} Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.839289 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.856522 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.875212 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.889395 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.903099 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.918483 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.920003 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.920070 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.920083 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.920099 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.920110 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:52Z","lastTransitionTime":"2025-11-25T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.935014 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.958249 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.975924 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:52 crc kubenswrapper[4800]: I1125 15:17:52.987215 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:52Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.005502 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:53Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.023470 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:53Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.023587 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.023631 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.023643 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.023669 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.023683 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:53Z","lastTransitionTime":"2025-11-25T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.040681 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:53Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.061154 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:53Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.127031 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.127094 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.127109 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.127129 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.127145 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:53Z","lastTransitionTime":"2025-11-25T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.230300 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.230845 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.230880 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.230902 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.230913 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:53Z","lastTransitionTime":"2025-11-25T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.333270 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.333330 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.333398 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.333427 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.333450 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:53Z","lastTransitionTime":"2025-11-25T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.436412 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.436443 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.436453 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.436465 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.436474 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:53Z","lastTransitionTime":"2025-11-25T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.540287 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.540358 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.540384 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.540420 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.540445 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:53Z","lastTransitionTime":"2025-11-25T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.645395 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.645473 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.645491 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.645520 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.645540 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:53Z","lastTransitionTime":"2025-11-25T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.748578 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.748634 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.748646 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.748661 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.748675 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:53Z","lastTransitionTime":"2025-11-25T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.784983 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:53 crc kubenswrapper[4800]: E1125 15:17:53.785292 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.828224 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" event={"ID":"69669849-59a1-47d8-9583-4ed964926242","Type":"ContainerStarted","Data":"8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d"} Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.834068 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91"} Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.834845 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.834917 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.852876 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.852925 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.852938 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.852957 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.852971 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:53Z","lastTransitionTime":"2025-11-25T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.854944 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:53Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.915723 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.918391 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.920542 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:53Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.937444 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:53Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.953483 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:53Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.955957 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.955990 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.956003 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.956021 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.956031 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:53Z","lastTransitionTime":"2025-11-25T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.966377 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:53Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.984140 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:53Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:53 crc kubenswrapper[4800]: I1125 15:17:53.998532 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:53Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.016443 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.032991 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.046868 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.060404 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.060641 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.060657 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.060669 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.060684 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.060694 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:54Z","lastTransitionTime":"2025-11-25T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.073613 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.087502 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.106610 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.122525 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.134800 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.149669 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.164389 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.165615 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.165655 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.165668 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.165691 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.165705 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:54Z","lastTransitionTime":"2025-11-25T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.181156 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.202776 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.216356 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.236722 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.252382 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.265257 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.270261 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.270329 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.270340 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.270359 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.270371 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:54Z","lastTransitionTime":"2025-11-25T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.275905 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.289108 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.302730 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.318504 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.373690 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.374027 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.374094 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.374215 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.374279 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:54Z","lastTransitionTime":"2025-11-25T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.477968 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.478664 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.478704 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.478730 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.478836 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:54Z","lastTransitionTime":"2025-11-25T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.582303 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.582416 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.582446 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.582477 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.582501 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:54Z","lastTransitionTime":"2025-11-25T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.686471 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.686526 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.686541 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.686566 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.686581 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:54Z","lastTransitionTime":"2025-11-25T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.784638 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.784709 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:54 crc kubenswrapper[4800]: E1125 15:17:54.784778 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:17:54 crc kubenswrapper[4800]: E1125 15:17:54.784964 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.789448 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.789470 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.789479 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.789491 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.789501 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:54Z","lastTransitionTime":"2025-11-25T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.841943 4800 generic.go:334] "Generic (PLEG): container finished" podID="69669849-59a1-47d8-9583-4ed964926242" containerID="8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d" exitCode=0 Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.842081 4800 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.842835 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" event={"ID":"69669849-59a1-47d8-9583-4ed964926242","Type":"ContainerDied","Data":"8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d"} Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.858364 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.874739 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.892313 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.892345 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.892354 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.892370 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.892381 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:54Z","lastTransitionTime":"2025-11-25T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.893613 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.907175 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.928411 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.943463 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.965761 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.982696 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.995941 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.995986 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.995999 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.996017 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:54 crc kubenswrapper[4800]: I1125 15:17:54.996029 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:54Z","lastTransitionTime":"2025-11-25T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.003355 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:54Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.026816 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.041449 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.059879 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.086108 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.101336 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.101405 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.101421 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.101589 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.101610 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:55Z","lastTransitionTime":"2025-11-25T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.105884 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.204317 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.204363 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.204372 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.204389 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.204402 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:55Z","lastTransitionTime":"2025-11-25T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.306729 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.306792 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.306805 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.306827 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.306860 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:55Z","lastTransitionTime":"2025-11-25T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.374649 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk"] Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.375514 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.378434 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.378885 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.399701 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.409635 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.409691 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.409703 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.409729 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.409743 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:55Z","lastTransitionTime":"2025-11-25T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.415553 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.429577 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.442611 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.456144 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.473900 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.486836 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/190f9d43-ec5b-488c-92fb-d522b746a16d-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-895nk\" (UID: \"190f9d43-ec5b-488c-92fb-d522b746a16d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.486909 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/190f9d43-ec5b-488c-92fb-d522b746a16d-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-895nk\" (UID: \"190f9d43-ec5b-488c-92fb-d522b746a16d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.486967 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twfcf\" (UniqueName: \"kubernetes.io/projected/190f9d43-ec5b-488c-92fb-d522b746a16d-kube-api-access-twfcf\") pod \"ovnkube-control-plane-749d76644c-895nk\" (UID: \"190f9d43-ec5b-488c-92fb-d522b746a16d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.486988 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/190f9d43-ec5b-488c-92fb-d522b746a16d-env-overrides\") pod \"ovnkube-control-plane-749d76644c-895nk\" (UID: \"190f9d43-ec5b-488c-92fb-d522b746a16d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.487993 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.513633 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.513685 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.513695 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.513714 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.513728 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:55Z","lastTransitionTime":"2025-11-25T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.515721 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.535523 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.554715 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.579227 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.588507 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twfcf\" (UniqueName: \"kubernetes.io/projected/190f9d43-ec5b-488c-92fb-d522b746a16d-kube-api-access-twfcf\") pod \"ovnkube-control-plane-749d76644c-895nk\" (UID: \"190f9d43-ec5b-488c-92fb-d522b746a16d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.588574 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/190f9d43-ec5b-488c-92fb-d522b746a16d-env-overrides\") pod \"ovnkube-control-plane-749d76644c-895nk\" (UID: \"190f9d43-ec5b-488c-92fb-d522b746a16d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.588623 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/190f9d43-ec5b-488c-92fb-d522b746a16d-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-895nk\" (UID: \"190f9d43-ec5b-488c-92fb-d522b746a16d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.588651 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/190f9d43-ec5b-488c-92fb-d522b746a16d-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-895nk\" (UID: \"190f9d43-ec5b-488c-92fb-d522b746a16d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.589532 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/190f9d43-ec5b-488c-92fb-d522b746a16d-env-overrides\") pod \"ovnkube-control-plane-749d76644c-895nk\" (UID: \"190f9d43-ec5b-488c-92fb-d522b746a16d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.589616 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/190f9d43-ec5b-488c-92fb-d522b746a16d-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-895nk\" (UID: \"190f9d43-ec5b-488c-92fb-d522b746a16d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.597411 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.603826 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/190f9d43-ec5b-488c-92fb-d522b746a16d-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-895nk\" (UID: \"190f9d43-ec5b-488c-92fb-d522b746a16d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.607328 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twfcf\" (UniqueName: \"kubernetes.io/projected/190f9d43-ec5b-488c-92fb-d522b746a16d-kube-api-access-twfcf\") pod \"ovnkube-control-plane-749d76644c-895nk\" (UID: \"190f9d43-ec5b-488c-92fb-d522b746a16d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.614517 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.616417 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.616471 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.616483 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.616504 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.616516 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:55Z","lastTransitionTime":"2025-11-25T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.631782 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.645486 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.693289 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" Nov 25 15:17:55 crc kubenswrapper[4800]: W1125 15:17:55.712221 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod190f9d43_ec5b_488c_92fb_d522b746a16d.slice/crio-e56a8376e11568fed15d92b5453a7f978d2297659c1391c9b6ccc2e12f078feb WatchSource:0}: Error finding container e56a8376e11568fed15d92b5453a7f978d2297659c1391c9b6ccc2e12f078feb: Status 404 returned error can't find the container with id e56a8376e11568fed15d92b5453a7f978d2297659c1391c9b6ccc2e12f078feb Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.720602 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.720646 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.720662 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.720680 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.720692 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:55Z","lastTransitionTime":"2025-11-25T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.784723 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:55 crc kubenswrapper[4800]: E1125 15:17:55.784901 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.823665 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.824034 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.824225 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.824342 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.824426 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:55Z","lastTransitionTime":"2025-11-25T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.849534 4800 generic.go:334] "Generic (PLEG): container finished" podID="69669849-59a1-47d8-9583-4ed964926242" containerID="50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd" exitCode=0 Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.849623 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" event={"ID":"69669849-59a1-47d8-9583-4ed964926242","Type":"ContainerDied","Data":"50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd"} Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.853469 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" event={"ID":"190f9d43-ec5b-488c-92fb-d522b746a16d","Type":"ContainerStarted","Data":"e56a8376e11568fed15d92b5453a7f978d2297659c1391c9b6ccc2e12f078feb"} Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.853576 4800 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.869496 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.885795 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.901368 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.915879 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.927162 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.927217 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.927237 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.927261 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.927276 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:55Z","lastTransitionTime":"2025-11-25T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.930915 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.947343 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.960443 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.977607 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:55 crc kubenswrapper[4800]: I1125 15:17:55.996295 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:55Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.025183 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.030020 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.030065 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.030078 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.030101 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.030115 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.047983 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.070264 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.089572 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.109810 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.125768 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.133655 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.134124 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.134288 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.134422 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.134531 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.237420 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.237508 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.237526 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.237548 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.237560 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.340566 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.340902 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.340992 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.341082 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.341368 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.445381 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.445447 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.445461 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.445491 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.445510 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.498345 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.498520 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.498563 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.498600 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.498630 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.498819 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.498847 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.498887 4800 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.498958 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 15:18:12.498937414 +0000 UTC m=+53.553345916 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.499442 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.499467 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.499477 4800 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.499510 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 15:18:12.499500469 +0000 UTC m=+53.553908961 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.499646 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:12.499602891 +0000 UTC m=+53.554011373 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.499651 4800 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.499737 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:18:12.499726404 +0000 UTC m=+53.554134886 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.500236 4800 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.500484 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:18:12.500452113 +0000 UTC m=+53.554860635 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.516388 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-fjqzf"] Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.517163 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.517267 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.533596 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.548484 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.548536 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.548545 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.548565 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.548578 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.549363 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.569166 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.586296 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.599227 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8g524\" (UniqueName: \"kubernetes.io/projected/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-kube-api-access-8g524\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.599495 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.600964 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.618490 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.633504 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.652022 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.652077 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.652088 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.652109 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.652124 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.653843 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.680090 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.694129 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.700392 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8g524\" (UniqueName: \"kubernetes.io/projected/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-kube-api-access-8g524\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.700494 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.700674 4800 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.700749 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs podName:3e0ee245-1a7f-4428-bbd9-50de79d2cbd8 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:57.200726214 +0000 UTC m=+38.255134706 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs") pod "network-metrics-daemon-fjqzf" (UID: "3e0ee245-1a7f-4428-bbd9-50de79d2cbd8") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.719563 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.723468 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8g524\" (UniqueName: \"kubernetes.io/projected/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-kube-api-access-8g524\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.737428 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.755901 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.756566 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.756624 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.756639 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.756661 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.756677 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.771814 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.785417 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.785460 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.785644 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.785867 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.790137 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.805485 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.858580 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" event={"ID":"190f9d43-ec5b-488c-92fb-d522b746a16d","Type":"ContainerStarted","Data":"ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.858646 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" event={"ID":"190f9d43-ec5b-488c-92fb-d522b746a16d","Type":"ContainerStarted","Data":"e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.867628 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.867715 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.867732 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.867775 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.867792 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.870544 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" event={"ID":"69669849-59a1-47d8-9583-4ed964926242","Type":"ContainerStarted","Data":"a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.875471 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.875505 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.875516 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.875529 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.875541 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.879103 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.892420 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.896598 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.898036 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.898064 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.898456 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.898790 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.898819 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.911567 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.912331 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.920715 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.920762 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.920774 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.920798 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.920812 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.926153 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.932939 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.938271 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.938339 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.938353 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.938372 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.938385 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.939865 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.952425 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.955111 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.956696 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.956726 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.956737 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.956757 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.956770 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.970166 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.972327 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:56 crc kubenswrapper[4800]: E1125 15:17:56.972581 4800 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.974866 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.974922 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.974934 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.974953 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.974964 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:56Z","lastTransitionTime":"2025-11-25T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:56 crc kubenswrapper[4800]: I1125 15:17:56.987366 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.001328 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:56Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.018032 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.035869 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.054238 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.073067 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.078229 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.078272 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.078282 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.078300 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.078310 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:57Z","lastTransitionTime":"2025-11-25T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.089775 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.107057 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.133735 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.157437 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.174792 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.181612 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.181682 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.181699 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.181724 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.181742 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:57Z","lastTransitionTime":"2025-11-25T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.192522 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.207795 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:17:57 crc kubenswrapper[4800]: E1125 15:17:57.208046 4800 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:17:57 crc kubenswrapper[4800]: E1125 15:17:57.208133 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs podName:3e0ee245-1a7f-4428-bbd9-50de79d2cbd8 nodeName:}" failed. No retries permitted until 2025-11-25 15:17:58.208105228 +0000 UTC m=+39.262513730 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs") pod "network-metrics-daemon-fjqzf" (UID: "3e0ee245-1a7f-4428-bbd9-50de79d2cbd8") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.208270 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.234784 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.253482 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.273387 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.285740 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.285793 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.285808 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.285840 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.285886 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:57Z","lastTransitionTime":"2025-11-25T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.293988 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.316895 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.341373 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.362326 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.389372 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.389436 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.389457 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.389487 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.389508 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:57Z","lastTransitionTime":"2025-11-25T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.393782 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.408703 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.428740 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.445120 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.472515 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.493559 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.493620 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.493638 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.493665 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.493687 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:57Z","lastTransitionTime":"2025-11-25T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.599512 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.599565 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.599583 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.599650 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.599667 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:57Z","lastTransitionTime":"2025-11-25T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.702824 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.702926 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.702949 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.702979 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.703001 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:57Z","lastTransitionTime":"2025-11-25T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.785458 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.785478 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:57 crc kubenswrapper[4800]: E1125 15:17:57.785758 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:17:57 crc kubenswrapper[4800]: E1125 15:17:57.785983 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.805826 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.805933 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.805947 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.805970 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.805987 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:57Z","lastTransitionTime":"2025-11-25T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.878571 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovnkube-controller/0.log" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.882126 4800 generic.go:334] "Generic (PLEG): container finished" podID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerID="ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91" exitCode=1 Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.882235 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91"} Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.883548 4800 scope.go:117] "RemoveContainer" containerID="ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.897790 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.909381 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.909445 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.909456 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.909478 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.909491 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:57Z","lastTransitionTime":"2025-11-25T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.915929 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.929933 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.945120 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.960314 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.977686 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:57 crc kubenswrapper[4800]: I1125 15:17:57.990583 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:57Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.004258 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:58Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.012720 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.012765 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.012775 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.012796 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.012808 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:58Z","lastTransitionTime":"2025-11-25T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.020284 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:58Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.035098 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:58Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.050727 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:58Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.070116 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:58Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.091190 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:58Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.108165 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:58Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.116221 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.116267 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.116280 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.116299 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.116309 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:58Z","lastTransitionTime":"2025-11-25T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.132026 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"ormers/factory.go:160\\\\nI1125 15:17:56.847973 6048 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848258 6048 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848380 6048 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 15:17:56.848401 6048 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 15:17:56.848413 6048 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 15:17:56.848443 6048 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 15:17:56.848449 6048 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848467 6048 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 15:17:56.848505 6048 factory.go:656] Stopping watch factory\\\\nI1125 15:17:56.848511 6048 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 15:17:56.848534 6048 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 15:17:56.848526 6048 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 15:17:56.848544 6048 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 15:17:56.848552 6048 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:58Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.145177 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:58Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.219832 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.219926 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.219944 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.219973 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.219982 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.219994 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:58Z","lastTransitionTime":"2025-11-25T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:58 crc kubenswrapper[4800]: E1125 15:17:58.220213 4800 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:17:58 crc kubenswrapper[4800]: E1125 15:17:58.220290 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs podName:3e0ee245-1a7f-4428-bbd9-50de79d2cbd8 nodeName:}" failed. No retries permitted until 2025-11-25 15:18:00.220265369 +0000 UTC m=+41.274673851 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs") pod "network-metrics-daemon-fjqzf" (UID: "3e0ee245-1a7f-4428-bbd9-50de79d2cbd8") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.324768 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.324815 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.324830 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.324880 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.324905 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:58Z","lastTransitionTime":"2025-11-25T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.428254 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.428333 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.428356 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.428389 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.428413 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:58Z","lastTransitionTime":"2025-11-25T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.532598 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.532686 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.532714 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.532756 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.532805 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:58Z","lastTransitionTime":"2025-11-25T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.636284 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.636350 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.636370 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.636393 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.636408 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:58Z","lastTransitionTime":"2025-11-25T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.740043 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.740149 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.740173 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.740213 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.740239 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:58Z","lastTransitionTime":"2025-11-25T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.784916 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.784971 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:17:58 crc kubenswrapper[4800]: E1125 15:17:58.785325 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:17:58 crc kubenswrapper[4800]: E1125 15:17:58.785563 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.785801 4800 scope.go:117] "RemoveContainer" containerID="25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.843069 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.843132 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.843150 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.843178 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.843196 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:58Z","lastTransitionTime":"2025-11-25T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.946956 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.947113 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.947138 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.947168 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:58 crc kubenswrapper[4800]: I1125 15:17:58.947191 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:58Z","lastTransitionTime":"2025-11-25T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.051924 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.051992 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.052017 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.052056 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.052088 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:59Z","lastTransitionTime":"2025-11-25T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.156113 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.156182 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.156195 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.156214 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.156227 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:59Z","lastTransitionTime":"2025-11-25T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.259708 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.259782 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.259802 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.259836 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.259898 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:59Z","lastTransitionTime":"2025-11-25T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.362415 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.362483 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.362504 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.362524 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.362537 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:59Z","lastTransitionTime":"2025-11-25T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.466092 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.466170 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.466189 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.466219 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.466248 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:59Z","lastTransitionTime":"2025-11-25T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.570702 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.570788 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.570807 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.570837 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.570895 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:59Z","lastTransitionTime":"2025-11-25T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.677618 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.677679 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.677697 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.677725 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.677746 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:59Z","lastTransitionTime":"2025-11-25T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.781170 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.781241 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.781258 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.781286 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.781307 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:59Z","lastTransitionTime":"2025-11-25T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.786838 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.786888 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:17:59 crc kubenswrapper[4800]: E1125 15:17:59.787167 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:17:59 crc kubenswrapper[4800]: E1125 15:17:59.787493 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.811954 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:59Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.831894 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:59Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.863613 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"ormers/factory.go:160\\\\nI1125 15:17:56.847973 6048 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848258 6048 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848380 6048 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 15:17:56.848401 6048 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 15:17:56.848413 6048 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 15:17:56.848443 6048 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 15:17:56.848449 6048 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848467 6048 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 15:17:56.848505 6048 factory.go:656] Stopping watch factory\\\\nI1125 15:17:56.848511 6048 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 15:17:56.848534 6048 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 15:17:56.848526 6048 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 15:17:56.848544 6048 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 15:17:56.848552 6048 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:59Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.884366 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:59Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.884929 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.885000 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.885022 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.885055 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.885076 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:59Z","lastTransitionTime":"2025-11-25T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.899681 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:59Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.920665 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:59Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.940097 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:59Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.958943 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:59Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.974131 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:59Z is after 2025-08-24T17:21:41Z" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.988970 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.989016 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.989034 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.989056 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.989069 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:17:59Z","lastTransitionTime":"2025-11-25T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:17:59 crc kubenswrapper[4800]: I1125 15:17:59.997161 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:17:59Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.013080 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:00Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.034792 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:00Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.052143 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:00Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.072927 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:00Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.090968 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:00Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.091948 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.091999 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.092019 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.092046 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.092061 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:00Z","lastTransitionTime":"2025-11-25T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.105232 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:00Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.196152 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.196219 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.196234 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.196257 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.196273 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:00Z","lastTransitionTime":"2025-11-25T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.245626 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:00 crc kubenswrapper[4800]: E1125 15:18:00.245990 4800 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:18:00 crc kubenswrapper[4800]: E1125 15:18:00.246141 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs podName:3e0ee245-1a7f-4428-bbd9-50de79d2cbd8 nodeName:}" failed. No retries permitted until 2025-11-25 15:18:04.24610163 +0000 UTC m=+45.300510152 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs") pod "network-metrics-daemon-fjqzf" (UID: "3e0ee245-1a7f-4428-bbd9-50de79d2cbd8") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.299946 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.300005 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.300023 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.300061 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.300081 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:00Z","lastTransitionTime":"2025-11-25T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.403045 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.403099 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.403111 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.403132 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.403149 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:00Z","lastTransitionTime":"2025-11-25T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.506600 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.506651 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.506662 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.506694 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.506708 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:00Z","lastTransitionTime":"2025-11-25T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.609645 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.609752 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.609778 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.609820 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.609904 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:00Z","lastTransitionTime":"2025-11-25T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.712868 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.712931 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.712946 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.712968 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.712982 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:00Z","lastTransitionTime":"2025-11-25T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.785105 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.785316 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:00 crc kubenswrapper[4800]: E1125 15:18:00.785363 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:00 crc kubenswrapper[4800]: E1125 15:18:00.785586 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.816265 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.816327 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.816344 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.816370 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.816389 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:00Z","lastTransitionTime":"2025-11-25T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.900834 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovnkube-controller/0.log" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.904742 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c"} Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.918946 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.919011 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.919032 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.919057 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:00 crc kubenswrapper[4800]: I1125 15:18:00.919077 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:00Z","lastTransitionTime":"2025-11-25T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.022015 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.022098 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.022108 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.022128 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.022140 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:01Z","lastTransitionTime":"2025-11-25T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.125775 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.125808 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.125817 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.125835 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.125864 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:01Z","lastTransitionTime":"2025-11-25T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.230437 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.230484 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.230497 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.230517 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.230531 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:01Z","lastTransitionTime":"2025-11-25T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.333983 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.334031 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.334040 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.334056 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.334067 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:01Z","lastTransitionTime":"2025-11-25T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.436639 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.436729 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.436749 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.436783 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.436801 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:01Z","lastTransitionTime":"2025-11-25T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.539566 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.539637 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.539661 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.539693 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.539716 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:01Z","lastTransitionTime":"2025-11-25T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.644063 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.644476 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.644492 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.644513 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.644529 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:01Z","lastTransitionTime":"2025-11-25T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.747889 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.747936 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.747949 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.747965 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.747980 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:01Z","lastTransitionTime":"2025-11-25T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.784998 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.785195 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:01 crc kubenswrapper[4800]: E1125 15:18:01.785388 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:01 crc kubenswrapper[4800]: E1125 15:18:01.785484 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.851400 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.851467 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.851483 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.851502 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.851515 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:01Z","lastTransitionTime":"2025-11-25T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.911453 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.914345 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23"} Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.914457 4800 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.933691 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:01Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.949008 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:01Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.956165 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.956206 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.956214 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.956236 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.956247 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:01Z","lastTransitionTime":"2025-11-25T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.963813 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:01Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.976744 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:01Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:01 crc kubenswrapper[4800]: I1125 15:18:01.986956 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:01Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.000533 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:01Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.014389 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.040516 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"ormers/factory.go:160\\\\nI1125 15:17:56.847973 6048 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848258 6048 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848380 6048 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 15:17:56.848401 6048 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 15:17:56.848413 6048 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 15:17:56.848443 6048 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 15:17:56.848449 6048 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848467 6048 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 15:17:56.848505 6048 factory.go:656] Stopping watch factory\\\\nI1125 15:17:56.848511 6048 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 15:17:56.848534 6048 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 15:17:56.848526 6048 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 15:17:56.848544 6048 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 15:17:56.848552 6048 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.062811 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.065458 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.065524 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.065539 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.065563 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.065577 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:02Z","lastTransitionTime":"2025-11-25T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.077018 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.093777 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.126900 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.149029 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.167991 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.168027 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.168038 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.168056 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.168066 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:02Z","lastTransitionTime":"2025-11-25T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.169186 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.187653 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.202052 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.271020 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.271073 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.271083 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.271100 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.271111 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:02Z","lastTransitionTime":"2025-11-25T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.374618 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.374687 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.374701 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.374728 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.374745 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:02Z","lastTransitionTime":"2025-11-25T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.478623 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.478691 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.478706 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.478739 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.478755 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:02Z","lastTransitionTime":"2025-11-25T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.581451 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.581510 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.581526 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.581547 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.581561 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:02Z","lastTransitionTime":"2025-11-25T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.684714 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.684760 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.684769 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.684785 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.684795 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:02Z","lastTransitionTime":"2025-11-25T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.785494 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.785503 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:02 crc kubenswrapper[4800]: E1125 15:18:02.785779 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:02 crc kubenswrapper[4800]: E1125 15:18:02.785879 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.787363 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.787414 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.787427 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.787447 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.787460 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:02Z","lastTransitionTime":"2025-11-25T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.890244 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.890288 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.890300 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.890316 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.890326 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:02Z","lastTransitionTime":"2025-11-25T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.918388 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.933007 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.952297 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.985973 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"ormers/factory.go:160\\\\nI1125 15:17:56.847973 6048 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848258 6048 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848380 6048 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 15:17:56.848401 6048 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 15:17:56.848413 6048 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 15:17:56.848443 6048 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 15:17:56.848449 6048 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848467 6048 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 15:17:56.848505 6048 factory.go:656] Stopping watch factory\\\\nI1125 15:17:56.848511 6048 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 15:17:56.848534 6048 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 15:17:56.848526 6048 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 15:17:56.848544 6048 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 15:17:56.848552 6048 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:02Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.992948 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.993006 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.993024 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.993050 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:02 crc kubenswrapper[4800]: I1125 15:18:02.993070 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:02Z","lastTransitionTime":"2025-11-25T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.007934 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.023952 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.044743 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.063406 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.087195 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.096710 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.096769 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.096794 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.096826 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.096895 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:03Z","lastTransitionTime":"2025-11-25T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.108709 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.135407 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.150713 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.172673 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.192637 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.200314 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.200374 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.200388 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.200410 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.200427 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:03Z","lastTransitionTime":"2025-11-25T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.213774 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.235630 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.251252 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:03Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.304652 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.304728 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.304746 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.304775 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.304796 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:03Z","lastTransitionTime":"2025-11-25T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.407976 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.408068 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.408089 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.408119 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.408178 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:03Z","lastTransitionTime":"2025-11-25T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.511339 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.511420 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.511437 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.511466 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.511485 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:03Z","lastTransitionTime":"2025-11-25T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.614945 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.614998 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.615009 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.615028 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.615042 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:03Z","lastTransitionTime":"2025-11-25T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.718328 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.718400 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.718424 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.718459 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.718486 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:03Z","lastTransitionTime":"2025-11-25T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.784519 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.784519 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:03 crc kubenswrapper[4800]: E1125 15:18:03.784900 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:03 crc kubenswrapper[4800]: E1125 15:18:03.785093 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.823143 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.823208 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.823220 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.823242 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.823259 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:03Z","lastTransitionTime":"2025-11-25T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.927261 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.927317 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.927331 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.927352 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:03 crc kubenswrapper[4800]: I1125 15:18:03.927365 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:03Z","lastTransitionTime":"2025-11-25T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.030595 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.030668 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.030682 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.030710 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.030726 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:04Z","lastTransitionTime":"2025-11-25T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.133814 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.133917 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.133937 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.133970 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.133985 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:04Z","lastTransitionTime":"2025-11-25T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.236931 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.236987 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.237000 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.237024 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.237036 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:04Z","lastTransitionTime":"2025-11-25T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.301941 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:04 crc kubenswrapper[4800]: E1125 15:18:04.302124 4800 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:18:04 crc kubenswrapper[4800]: E1125 15:18:04.302201 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs podName:3e0ee245-1a7f-4428-bbd9-50de79d2cbd8 nodeName:}" failed. No retries permitted until 2025-11-25 15:18:12.302181289 +0000 UTC m=+53.356589771 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs") pod "network-metrics-daemon-fjqzf" (UID: "3e0ee245-1a7f-4428-bbd9-50de79d2cbd8") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.339345 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.339393 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.339405 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.339425 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.339438 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:04Z","lastTransitionTime":"2025-11-25T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.442527 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.442628 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.442647 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.442676 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.442702 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:04Z","lastTransitionTime":"2025-11-25T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.545551 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.546094 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.546109 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.546132 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.546146 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:04Z","lastTransitionTime":"2025-11-25T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.650057 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.650099 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.650107 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.650124 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.650135 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:04Z","lastTransitionTime":"2025-11-25T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.753199 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.753246 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.753257 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.753277 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.753289 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:04Z","lastTransitionTime":"2025-11-25T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.784811 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.784943 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:04 crc kubenswrapper[4800]: E1125 15:18:04.785071 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:04 crc kubenswrapper[4800]: E1125 15:18:04.785154 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.856473 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.856540 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.856554 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.856578 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.856594 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:04Z","lastTransitionTime":"2025-11-25T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.928475 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovnkube-controller/1.log" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.929308 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovnkube-controller/0.log" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.932790 4800 generic.go:334] "Generic (PLEG): container finished" podID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerID="4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c" exitCode=1 Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.932872 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c"} Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.932936 4800 scope.go:117] "RemoveContainer" containerID="ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.933938 4800 scope.go:117] "RemoveContainer" containerID="4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c" Nov 25 15:18:04 crc kubenswrapper[4800]: E1125 15:18:04.934156 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.951594 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:04Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.959658 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.959720 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.959730 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.959750 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.959762 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:04Z","lastTransitionTime":"2025-11-25T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.975251 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:04Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:04 crc kubenswrapper[4800]: I1125 15:18:04.993705 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:04Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.011272 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.026231 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.043981 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.058712 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.063214 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.063254 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.063265 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.063284 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.063298 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:05Z","lastTransitionTime":"2025-11-25T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.082930 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc7a2e86032a2547a4bc8d0fbae082550cbbcdad2f2d400317688d6c9169b91\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"ormers/factory.go:160\\\\nI1125 15:17:56.847973 6048 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848258 6048 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848380 6048 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 15:17:56.848401 6048 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 15:17:56.848413 6048 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 15:17:56.848443 6048 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 15:17:56.848449 6048 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:17:56.848467 6048 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 15:17:56.848505 6048 factory.go:656] Stopping watch factory\\\\nI1125 15:17:56.848511 6048 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 15:17:56.848534 6048 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 15:17:56.848526 6048 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 15:17:56.848544 6048 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 15:17:56.848552 6048 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:04Z\\\",\\\"message\\\":\\\"lector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1125 15:18:04.447101 6296 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447534 6296 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447750 6296 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448103 6296 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448804 6296 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448985 6296 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449340 6296 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449925 6296 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.102721 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.117418 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.135752 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.155116 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.167232 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.167298 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.167332 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.167360 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.167370 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:05Z","lastTransitionTime":"2025-11-25T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.172484 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.189374 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.216522 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.238348 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:05Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.271196 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.271773 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.271965 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.272119 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.272283 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:05Z","lastTransitionTime":"2025-11-25T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.375339 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.375397 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.375410 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.375431 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.375446 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:05Z","lastTransitionTime":"2025-11-25T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.479363 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.479431 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.479441 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.479463 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.479476 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:05Z","lastTransitionTime":"2025-11-25T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.583130 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.583176 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.583188 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.583208 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.583224 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:05Z","lastTransitionTime":"2025-11-25T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.686593 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.686655 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.686666 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.686686 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.686698 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:05Z","lastTransitionTime":"2025-11-25T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.785027 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.785212 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:05 crc kubenswrapper[4800]: E1125 15:18:05.785304 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:05 crc kubenswrapper[4800]: E1125 15:18:05.785485 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.790870 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.790933 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.790949 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.790967 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.790983 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:05Z","lastTransitionTime":"2025-11-25T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.893769 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.893827 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.893870 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.893890 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.893906 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:05Z","lastTransitionTime":"2025-11-25T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.953555 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovnkube-controller/1.log" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.997832 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.997921 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.997938 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.997961 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:05 crc kubenswrapper[4800]: I1125 15:18:05.997978 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:05Z","lastTransitionTime":"2025-11-25T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.101175 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.101225 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.101258 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.101279 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.101292 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:06Z","lastTransitionTime":"2025-11-25T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.204890 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.204949 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.204965 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.204992 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.205008 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:06Z","lastTransitionTime":"2025-11-25T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.307873 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.307942 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.307959 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.307988 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.308007 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:06Z","lastTransitionTime":"2025-11-25T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.412331 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.412476 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.412505 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.412552 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.412585 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:06Z","lastTransitionTime":"2025-11-25T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.517233 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.517313 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.517337 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.517372 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.517398 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:06Z","lastTransitionTime":"2025-11-25T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.620597 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.620666 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.620681 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.620706 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.620723 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:06Z","lastTransitionTime":"2025-11-25T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.724432 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.724499 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.724515 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.724539 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.724555 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:06Z","lastTransitionTime":"2025-11-25T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.784765 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.784989 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:06 crc kubenswrapper[4800]: E1125 15:18:06.785080 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:06 crc kubenswrapper[4800]: E1125 15:18:06.785315 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.827777 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.827910 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.827938 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.827973 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.827999 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:06Z","lastTransitionTime":"2025-11-25T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.931964 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.932025 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.932038 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.932064 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:06 crc kubenswrapper[4800]: I1125 15:18:06.932078 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:06Z","lastTransitionTime":"2025-11-25T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.035143 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.035214 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.035236 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.035268 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.035292 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.052221 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.052269 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.052292 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.052320 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.052342 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: E1125 15:18:07.075488 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:07Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.082986 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.083045 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.083065 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.083096 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.083121 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: E1125 15:18:07.103956 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:07Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.109682 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.109745 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.109758 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.109780 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.109796 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: E1125 15:18:07.126820 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:07Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.131715 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.131936 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.132003 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.132073 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.132131 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: E1125 15:18:07.146077 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:07Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.150318 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.150362 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.150379 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.150400 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.150414 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: E1125 15:18:07.169762 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:07Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:07 crc kubenswrapper[4800]: E1125 15:18:07.169970 4800 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.172189 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.172247 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.172269 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.172295 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.172311 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.275614 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.275671 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.275683 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.275708 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.275724 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.379137 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.379196 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.379211 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.379234 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.379248 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.482109 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.482185 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.482208 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.482243 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.482266 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.585692 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.585772 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.585797 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.585829 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.585877 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.688891 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.688967 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.689048 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.689089 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.689119 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.785535 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.785757 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:07 crc kubenswrapper[4800]: E1125 15:18:07.786036 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:07 crc kubenswrapper[4800]: E1125 15:18:07.786375 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.794013 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.794070 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.794082 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.794099 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.794110 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.898117 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.898211 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.898236 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.898280 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:07 crc kubenswrapper[4800]: I1125 15:18:07.898313 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:07Z","lastTransitionTime":"2025-11-25T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.001559 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.001659 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.001685 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.001725 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.001753 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:08Z","lastTransitionTime":"2025-11-25T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.104834 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.104935 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.104954 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.104983 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.105007 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:08Z","lastTransitionTime":"2025-11-25T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.209031 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.209105 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.209123 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.209154 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.209175 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:08Z","lastTransitionTime":"2025-11-25T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.313025 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.313109 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.313132 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.313163 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.313185 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:08Z","lastTransitionTime":"2025-11-25T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.416886 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.416946 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.416957 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.416977 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.416989 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:08Z","lastTransitionTime":"2025-11-25T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.521197 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.521278 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.521303 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.521332 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.521353 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:08Z","lastTransitionTime":"2025-11-25T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.624414 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.624493 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.624517 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.624549 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.624572 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:08Z","lastTransitionTime":"2025-11-25T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.728563 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.728672 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.728692 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.728722 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.728741 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:08Z","lastTransitionTime":"2025-11-25T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.784648 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.784746 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:08 crc kubenswrapper[4800]: E1125 15:18:08.784975 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:08 crc kubenswrapper[4800]: E1125 15:18:08.785059 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.833390 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.833446 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.833459 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.833481 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.833494 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:08Z","lastTransitionTime":"2025-11-25T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.937928 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.938002 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.938020 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.938049 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.938070 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:08Z","lastTransitionTime":"2025-11-25T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.980786 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:18:08 crc kubenswrapper[4800]: I1125 15:18:08.982379 4800 scope.go:117] "RemoveContainer" containerID="4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c" Nov 25 15:18:08 crc kubenswrapper[4800]: E1125 15:18:08.982733 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.007171 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.042131 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.042181 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.042190 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.042208 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.042217 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:09Z","lastTransitionTime":"2025-11-25T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.046057 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:04Z\\\",\\\"message\\\":\\\"lector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1125 15:18:04.447101 6296 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447534 6296 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447750 6296 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448103 6296 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448804 6296 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448985 6296 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449340 6296 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449925 6296 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.067651 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.085813 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.106279 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.125235 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.146515 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.146617 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.146640 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.146671 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.146701 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:09Z","lastTransitionTime":"2025-11-25T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.149920 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.171579 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.200449 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.219907 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.240835 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.250167 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.250227 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.250241 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.250261 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.250292 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:09Z","lastTransitionTime":"2025-11-25T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.264404 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.286090 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.305721 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.333062 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.353236 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.353317 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.353340 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.353371 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.353394 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:09Z","lastTransitionTime":"2025-11-25T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.355139 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.458006 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.458088 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.458106 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.458135 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.458155 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:09Z","lastTransitionTime":"2025-11-25T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.561514 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.561593 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.561607 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.561634 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.561654 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:09Z","lastTransitionTime":"2025-11-25T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.665180 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.665269 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.665339 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.665380 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.665403 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:09Z","lastTransitionTime":"2025-11-25T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.769306 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.769412 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.769439 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.769474 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.769502 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:09Z","lastTransitionTime":"2025-11-25T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.785041 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:09 crc kubenswrapper[4800]: E1125 15:18:09.785289 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.785073 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:09 crc kubenswrapper[4800]: E1125 15:18:09.786646 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.810704 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.833161 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.853460 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.870626 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.873340 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.873492 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.873581 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.873685 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.873795 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:09Z","lastTransitionTime":"2025-11-25T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.895011 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.915021 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.935249 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.970305 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:04Z\\\",\\\"message\\\":\\\"lector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1125 15:18:04.447101 6296 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447534 6296 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447750 6296 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448103 6296 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448804 6296 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448985 6296 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449340 6296 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449925 6296 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.975529 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.975564 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.975574 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.975588 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.975598 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:09Z","lastTransitionTime":"2025-11-25T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:09 crc kubenswrapper[4800]: I1125 15:18:09.988538 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:09Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.003585 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:10Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.021425 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:10Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.036230 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:10Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.053620 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:10Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.073153 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:10Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.078591 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.078646 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.078658 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.078680 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.078733 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:10Z","lastTransitionTime":"2025-11-25T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.091432 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:10Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.124066 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:10Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.184061 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.184134 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.184148 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.184173 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.184188 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:10Z","lastTransitionTime":"2025-11-25T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.287336 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.287417 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.287437 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.287467 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.287487 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:10Z","lastTransitionTime":"2025-11-25T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.391299 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.391369 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.391403 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.391429 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.391444 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:10Z","lastTransitionTime":"2025-11-25T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.494187 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.494258 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.494274 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.494303 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.494318 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:10Z","lastTransitionTime":"2025-11-25T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.597748 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.597808 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.597822 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.597861 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.597875 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:10Z","lastTransitionTime":"2025-11-25T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.701214 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.701264 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.701275 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.701296 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.701308 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:10Z","lastTransitionTime":"2025-11-25T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.785226 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.785312 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:10 crc kubenswrapper[4800]: E1125 15:18:10.785432 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:10 crc kubenswrapper[4800]: E1125 15:18:10.785534 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.804454 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.804526 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.804541 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.804562 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.804576 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:10Z","lastTransitionTime":"2025-11-25T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.908218 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.908273 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.908287 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.908308 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:10 crc kubenswrapper[4800]: I1125 15:18:10.908325 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:10Z","lastTransitionTime":"2025-11-25T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.011432 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.011506 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.011527 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.011555 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.011575 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:11Z","lastTransitionTime":"2025-11-25T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.116900 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.117009 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.117032 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.117059 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.117079 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:11Z","lastTransitionTime":"2025-11-25T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.220692 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.220774 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.220797 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.220827 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.220890 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:11Z","lastTransitionTime":"2025-11-25T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.325194 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.325258 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.325272 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.325297 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.325316 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:11Z","lastTransitionTime":"2025-11-25T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.429055 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.429118 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.429142 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.429172 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.429189 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:11Z","lastTransitionTime":"2025-11-25T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.532902 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.532965 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.532979 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.533009 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.533024 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:11Z","lastTransitionTime":"2025-11-25T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.636008 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.636099 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.636118 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.636152 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.636174 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:11Z","lastTransitionTime":"2025-11-25T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.739429 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.739488 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.739498 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.739516 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.739533 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:11Z","lastTransitionTime":"2025-11-25T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.784975 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.785078 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:11 crc kubenswrapper[4800]: E1125 15:18:11.785235 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:11 crc kubenswrapper[4800]: E1125 15:18:11.785570 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.843482 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.843558 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.843571 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.843612 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.843624 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:11Z","lastTransitionTime":"2025-11-25T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.946809 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.946889 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.946920 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.946941 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:11 crc kubenswrapper[4800]: I1125 15:18:11.946953 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:11Z","lastTransitionTime":"2025-11-25T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.051079 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.051133 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.051149 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.051170 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.051182 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:12Z","lastTransitionTime":"2025-11-25T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.154181 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.154243 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.154257 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.154279 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.154293 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:12Z","lastTransitionTime":"2025-11-25T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.258096 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.258169 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.258196 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.258221 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.258243 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:12Z","lastTransitionTime":"2025-11-25T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.323235 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.323519 4800 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.323652 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs podName:3e0ee245-1a7f-4428-bbd9-50de79d2cbd8 nodeName:}" failed. No retries permitted until 2025-11-25 15:18:28.323622601 +0000 UTC m=+69.378031073 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs") pod "network-metrics-daemon-fjqzf" (UID: "3e0ee245-1a7f-4428-bbd9-50de79d2cbd8") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.367715 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.367823 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.367933 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.367964 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.367978 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:12Z","lastTransitionTime":"2025-11-25T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.475826 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.475892 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.475901 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.475922 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.475933 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:12Z","lastTransitionTime":"2025-11-25T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.527617 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.527811 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.527862 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:44.527806712 +0000 UTC m=+85.582215194 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.527906 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.527952 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.527996 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.528111 4800 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.528113 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.528157 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.528171 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.528182 4800 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.528196 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:18:44.528182692 +0000 UTC m=+85.582591174 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.528199 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.528218 4800 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.528251 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 15:18:44.528227343 +0000 UTC m=+85.582635845 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.528240 4800 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.528283 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 15:18:44.528269194 +0000 UTC m=+85.582677696 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.528458 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:18:44.528414408 +0000 UTC m=+85.582822920 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.579698 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.579790 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.579808 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.579835 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.579867 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:12Z","lastTransitionTime":"2025-11-25T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.683238 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.683282 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.683298 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.683326 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.683338 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:12Z","lastTransitionTime":"2025-11-25T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.784399 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.784631 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.784423 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:12 crc kubenswrapper[4800]: E1125 15:18:12.784980 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.787413 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.787500 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.787521 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.787551 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.787571 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:12Z","lastTransitionTime":"2025-11-25T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.908637 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.908710 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.908727 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.908760 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:12 crc kubenswrapper[4800]: I1125 15:18:12.908779 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:12Z","lastTransitionTime":"2025-11-25T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.011545 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.011607 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.011620 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.011642 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.011656 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:13Z","lastTransitionTime":"2025-11-25T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.114800 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.114923 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.114949 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.114985 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.115007 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:13Z","lastTransitionTime":"2025-11-25T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.219057 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.219130 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.219148 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.219177 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.219196 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:13Z","lastTransitionTime":"2025-11-25T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.323092 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.323192 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.323230 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.323268 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.323296 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:13Z","lastTransitionTime":"2025-11-25T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.427232 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.427287 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.427301 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.427325 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.427340 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:13Z","lastTransitionTime":"2025-11-25T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.530760 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.530805 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.530819 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.530837 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.530870 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:13Z","lastTransitionTime":"2025-11-25T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.634968 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.635058 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.635080 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.635111 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.635135 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:13Z","lastTransitionTime":"2025-11-25T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.738406 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.738489 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.738507 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.738541 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.738559 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:13Z","lastTransitionTime":"2025-11-25T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.741030 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.760508 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.763478 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:13Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.781629 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:13Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.785141 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:13 crc kubenswrapper[4800]: E1125 15:18:13.785346 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.785146 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:13 crc kubenswrapper[4800]: E1125 15:18:13.785682 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.816488 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:04Z\\\",\\\"message\\\":\\\"lector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1125 15:18:04.447101 6296 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447534 6296 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447750 6296 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448103 6296 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448804 6296 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448985 6296 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449340 6296 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449925 6296 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:13Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.839479 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:13Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.841940 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.842018 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.842063 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.842090 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.842105 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:13Z","lastTransitionTime":"2025-11-25T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.857629 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:13Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.881680 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:13Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.900435 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:13Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.919881 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:13Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.945689 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.945748 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.945766 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.945792 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.945811 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:13Z","lastTransitionTime":"2025-11-25T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.947057 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:13Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.974089 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:13Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:13 crc kubenswrapper[4800]: I1125 15:18:13.993616 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:13Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.015890 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:14Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.034426 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:14Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.050017 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.050081 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.050095 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.050119 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.050134 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:14Z","lastTransitionTime":"2025-11-25T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.050833 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:14Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.068372 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:14Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.085527 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:14Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.153760 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.153883 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.153900 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.153929 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.153974 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:14Z","lastTransitionTime":"2025-11-25T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.257332 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.257397 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.257415 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.257445 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.257462 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:14Z","lastTransitionTime":"2025-11-25T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.360946 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.361027 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.361047 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.361081 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.361103 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:14Z","lastTransitionTime":"2025-11-25T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.465702 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.465788 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.465814 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.465890 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.465916 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:14Z","lastTransitionTime":"2025-11-25T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.570675 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.570745 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.570763 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.570791 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.570814 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:14Z","lastTransitionTime":"2025-11-25T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.675314 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.675364 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.675374 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.675392 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.675402 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:14Z","lastTransitionTime":"2025-11-25T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.778814 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.778923 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.778941 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.778972 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.778992 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:14Z","lastTransitionTime":"2025-11-25T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.785106 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.785226 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:14 crc kubenswrapper[4800]: E1125 15:18:14.785261 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:14 crc kubenswrapper[4800]: E1125 15:18:14.785505 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.882465 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.882550 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.882585 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.882626 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.882650 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:14Z","lastTransitionTime":"2025-11-25T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.986157 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.986233 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.986252 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.986289 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:14 crc kubenswrapper[4800]: I1125 15:18:14.986311 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:14Z","lastTransitionTime":"2025-11-25T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.088993 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.089084 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.089100 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.089124 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.089140 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:15Z","lastTransitionTime":"2025-11-25T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.192400 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.192468 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.192487 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.192515 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.192536 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:15Z","lastTransitionTime":"2025-11-25T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.296235 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.296335 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.296357 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.296390 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.296412 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:15Z","lastTransitionTime":"2025-11-25T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.399866 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.399925 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.399941 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.399968 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.399985 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:15Z","lastTransitionTime":"2025-11-25T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.503406 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.503553 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.503576 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.503605 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.503622 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:15Z","lastTransitionTime":"2025-11-25T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.606957 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.607018 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.607036 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.607059 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.607076 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:15Z","lastTransitionTime":"2025-11-25T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.709739 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.709839 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.709907 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.709937 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.709958 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:15Z","lastTransitionTime":"2025-11-25T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.784830 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.784925 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:15 crc kubenswrapper[4800]: E1125 15:18:15.785080 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:15 crc kubenswrapper[4800]: E1125 15:18:15.785901 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.813385 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.813438 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.813450 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.813469 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.813485 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:15Z","lastTransitionTime":"2025-11-25T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.917011 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.917095 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.917118 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.917148 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:15 crc kubenswrapper[4800]: I1125 15:18:15.917170 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:15Z","lastTransitionTime":"2025-11-25T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.019862 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.019923 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.019940 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.019965 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.019983 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:16Z","lastTransitionTime":"2025-11-25T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.122284 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.122355 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.122372 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.122398 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.122433 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:16Z","lastTransitionTime":"2025-11-25T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.226346 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.226417 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.226434 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.226464 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.226487 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:16Z","lastTransitionTime":"2025-11-25T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.330466 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.330547 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.330573 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.330610 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.330635 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:16Z","lastTransitionTime":"2025-11-25T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.434893 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.434955 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.434966 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.434988 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.434999 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:16Z","lastTransitionTime":"2025-11-25T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.538874 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.538946 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.538964 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.538990 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.539013 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:16Z","lastTransitionTime":"2025-11-25T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.642184 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.642258 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.642267 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.642286 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.642297 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:16Z","lastTransitionTime":"2025-11-25T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.746189 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.746293 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.746321 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.746351 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.746371 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:16Z","lastTransitionTime":"2025-11-25T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.785302 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:16 crc kubenswrapper[4800]: E1125 15:18:16.785517 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.785812 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:16 crc kubenswrapper[4800]: E1125 15:18:16.785971 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.862014 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.862090 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.862114 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.862147 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.862205 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:16Z","lastTransitionTime":"2025-11-25T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.965914 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.965979 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.965994 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.966012 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:16 crc kubenswrapper[4800]: I1125 15:18:16.966024 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:16Z","lastTransitionTime":"2025-11-25T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.070158 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.070226 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.070243 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.070260 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.070293 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.153150 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.170742 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.173570 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.173622 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.173642 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.173666 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.173685 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.185193 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.215469 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:04Z\\\",\\\"message\\\":\\\"lector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1125 15:18:04.447101 6296 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447534 6296 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447750 6296 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448103 6296 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448804 6296 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448985 6296 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449340 6296 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449925 6296 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.235293 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.251828 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.270504 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.277790 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.277852 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.277863 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.277881 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.277893 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.296767 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.296824 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.296872 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.296902 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.296922 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.301929 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: E1125 15:18:17.314317 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.326472 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.331188 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.331264 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.331302 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.331327 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.331341 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.350121 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: E1125 15:18:17.352872 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.358255 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.358312 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.358328 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.358351 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.358367 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.366738 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51d32445-e5a5-4ec4-a738-6ddfd5aa494f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8784e959be8194e732572ea918ad9c2b97bd26e2cee3213b20d968cc3688aed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ff2f8c2e88b40d0f60aa08ddf34a804883aecc946cfef5dc366108603b49d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48be4d0f7dd1ac847125ec8b42ea938331c5cb0ab2f86081de6abcd43cd08d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: E1125 15:18:17.374323 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.379072 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.379116 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.379129 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.379152 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.379170 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.383735 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: E1125 15:18:17.397468 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.401348 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.401421 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.401438 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.401461 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.401472 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.402036 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.413927 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: E1125 15:18:17.414029 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: E1125 15:18:17.414149 4800 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.417075 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.418039 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.418071 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.418106 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.418127 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.427609 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.452937 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.469694 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.482116 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:17Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.521112 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.521195 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.521209 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.521233 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.521249 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.624610 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.624654 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.624664 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.624679 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.624689 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.727859 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.727907 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.727918 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.727942 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.727955 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.785341 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.785398 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:17 crc kubenswrapper[4800]: E1125 15:18:17.785609 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:17 crc kubenswrapper[4800]: E1125 15:18:17.785800 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.831631 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.831672 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.831680 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.831698 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.831711 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.934651 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.934696 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.934706 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.934726 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:17 crc kubenswrapper[4800]: I1125 15:18:17.934737 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:17Z","lastTransitionTime":"2025-11-25T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.037776 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.037827 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.037869 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.037890 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.037903 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:18Z","lastTransitionTime":"2025-11-25T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.140111 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.140147 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.140161 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.140178 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.140193 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:18Z","lastTransitionTime":"2025-11-25T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.242699 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.242733 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.242744 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.242759 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.242770 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:18Z","lastTransitionTime":"2025-11-25T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.346799 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.346877 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.346892 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.346916 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.346931 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:18Z","lastTransitionTime":"2025-11-25T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.451009 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.451053 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.451065 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.451081 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.451092 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:18Z","lastTransitionTime":"2025-11-25T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.553884 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.553950 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.553970 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.554001 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.554023 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:18Z","lastTransitionTime":"2025-11-25T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.657885 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.657959 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.657981 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.658011 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.658034 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:18Z","lastTransitionTime":"2025-11-25T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.761217 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.761262 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.761273 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.761290 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.761300 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:18Z","lastTransitionTime":"2025-11-25T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.785186 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.785203 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:18 crc kubenswrapper[4800]: E1125 15:18:18.785505 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:18 crc kubenswrapper[4800]: E1125 15:18:18.785743 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.864809 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.864916 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.864936 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.864963 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.864980 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:18Z","lastTransitionTime":"2025-11-25T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.969991 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.970063 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.970075 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.970104 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:18 crc kubenswrapper[4800]: I1125 15:18:18.970118 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:18Z","lastTransitionTime":"2025-11-25T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.073976 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.074053 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.074071 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.074103 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.074124 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:19Z","lastTransitionTime":"2025-11-25T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.178689 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.178753 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.178772 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.178801 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.178828 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:19Z","lastTransitionTime":"2025-11-25T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.282167 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.282241 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.282262 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.282293 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.282314 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:19Z","lastTransitionTime":"2025-11-25T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.385638 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.385732 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.385755 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.385789 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.385815 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:19Z","lastTransitionTime":"2025-11-25T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.489732 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.489796 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.489813 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.489886 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.489907 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:19Z","lastTransitionTime":"2025-11-25T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.593413 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.593504 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.593534 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.593571 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.593599 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:19Z","lastTransitionTime":"2025-11-25T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.697764 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.697895 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.697916 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.697950 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.697971 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:19Z","lastTransitionTime":"2025-11-25T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.786479 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:19 crc kubenswrapper[4800]: E1125 15:18:19.786693 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.789579 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:19 crc kubenswrapper[4800]: E1125 15:18:19.789761 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.802738 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.802831 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.802885 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.802919 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.802959 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:19Z","lastTransitionTime":"2025-11-25T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.830369 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:19Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.851197 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:19Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.889193 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:04Z\\\",\\\"message\\\":\\\"lector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1125 15:18:04.447101 6296 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447534 6296 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447750 6296 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448103 6296 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448804 6296 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448985 6296 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449340 6296 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449925 6296 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:19Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.906736 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.906803 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.906822 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.906881 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.907096 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:19Z","lastTransitionTime":"2025-11-25T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.914900 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:19Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.936284 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:19Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.962050 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:19Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:19 crc kubenswrapper[4800]: I1125 15:18:19.982318 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:19Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.003039 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51d32445-e5a5-4ec4-a738-6ddfd5aa494f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8784e959be8194e732572ea918ad9c2b97bd26e2cee3213b20d968cc3688aed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ff2f8c2e88b40d0f60aa08ddf34a804883aecc946cfef5dc366108603b49d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48be4d0f7dd1ac847125ec8b42ea938331c5cb0ab2f86081de6abcd43cd08d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:20Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.010948 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.011012 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.011040 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.011072 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.011098 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:20Z","lastTransitionTime":"2025-11-25T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.023507 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:20Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.048804 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:20Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.065878 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:20Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.088425 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:20Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.104886 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:20Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.114302 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.114386 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.114416 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.114453 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.114476 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:20Z","lastTransitionTime":"2025-11-25T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.127716 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:20Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.149326 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:20Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.167765 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:20Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.187054 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:20Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.218308 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.218379 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.218400 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.218429 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.218456 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:20Z","lastTransitionTime":"2025-11-25T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.326078 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.326901 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.326944 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.326986 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.327012 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:20Z","lastTransitionTime":"2025-11-25T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.431181 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.431238 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.431248 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.431270 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.431286 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:20Z","lastTransitionTime":"2025-11-25T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.534568 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.534624 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.534665 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.534693 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.534707 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:20Z","lastTransitionTime":"2025-11-25T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.638440 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.638513 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.638537 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.638566 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.638585 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:20Z","lastTransitionTime":"2025-11-25T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.743676 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.744064 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.744363 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.744587 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.744720 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:20Z","lastTransitionTime":"2025-11-25T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.786536 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:20 crc kubenswrapper[4800]: E1125 15:18:20.786713 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.786969 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:20 crc kubenswrapper[4800]: E1125 15:18:20.787030 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.787670 4800 scope.go:117] "RemoveContainer" containerID="4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.848789 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.849368 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.849377 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.849397 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.849411 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:20Z","lastTransitionTime":"2025-11-25T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.952426 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.952490 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.952502 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.952521 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:20 crc kubenswrapper[4800]: I1125 15:18:20.952538 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:20Z","lastTransitionTime":"2025-11-25T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.022772 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovnkube-controller/1.log" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.026930 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415"} Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.027699 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.045618 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.056364 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.056413 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.056425 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.056443 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.056456 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:21Z","lastTransitionTime":"2025-11-25T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.068587 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.088760 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.110929 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.122576 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.142747 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.156605 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.159335 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.159366 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.159375 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.159392 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.159407 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:21Z","lastTransitionTime":"2025-11-25T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.170318 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.183825 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51d32445-e5a5-4ec4-a738-6ddfd5aa494f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8784e959be8194e732572ea918ad9c2b97bd26e2cee3213b20d968cc3688aed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ff2f8c2e88b40d0f60aa08ddf34a804883aecc946cfef5dc366108603b49d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48be4d0f7dd1ac847125ec8b42ea938331c5cb0ab2f86081de6abcd43cd08d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.197420 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.208266 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.218276 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.232587 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.251039 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.262321 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.262368 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.262378 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.262396 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.262408 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:21Z","lastTransitionTime":"2025-11-25T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.268518 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.296042 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:04Z\\\",\\\"message\\\":\\\"lector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1125 15:18:04.447101 6296 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447534 6296 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447750 6296 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448103 6296 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448804 6296 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448985 6296 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449340 6296 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449925 6296 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.311111 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:21Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.373007 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.373064 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.373075 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.373093 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.373105 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:21Z","lastTransitionTime":"2025-11-25T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.476985 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.477037 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.477047 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.477065 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.477076 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:21Z","lastTransitionTime":"2025-11-25T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.580450 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.580506 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.580519 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.580538 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.580566 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:21Z","lastTransitionTime":"2025-11-25T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.684403 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.684455 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.684470 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.684491 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.684505 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:21Z","lastTransitionTime":"2025-11-25T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.784771 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.784928 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:21 crc kubenswrapper[4800]: E1125 15:18:21.784992 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:21 crc kubenswrapper[4800]: E1125 15:18:21.785117 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.787202 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.787234 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.787244 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.787263 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.787276 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:21Z","lastTransitionTime":"2025-11-25T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.890222 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.890280 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.890292 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.890312 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.890325 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:21Z","lastTransitionTime":"2025-11-25T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.993599 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.993659 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.993700 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.993725 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:21 crc kubenswrapper[4800]: I1125 15:18:21.993744 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:21Z","lastTransitionTime":"2025-11-25T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.096813 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.096887 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.096898 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.096919 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.096936 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:22Z","lastTransitionTime":"2025-11-25T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.200061 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.200120 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.200133 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.200154 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.200171 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:22Z","lastTransitionTime":"2025-11-25T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.303801 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.303884 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.303899 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.303920 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.303934 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:22Z","lastTransitionTime":"2025-11-25T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.408550 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.408627 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.408650 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.408682 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.408705 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:22Z","lastTransitionTime":"2025-11-25T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.511607 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.511665 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.511682 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.511707 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.511723 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:22Z","lastTransitionTime":"2025-11-25T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.614398 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.614444 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.614454 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.614470 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.614484 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:22Z","lastTransitionTime":"2025-11-25T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.718146 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.718221 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.718244 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.718273 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.718293 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:22Z","lastTransitionTime":"2025-11-25T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.784716 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.784797 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:22 crc kubenswrapper[4800]: E1125 15:18:22.784958 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:22 crc kubenswrapper[4800]: E1125 15:18:22.785081 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.821356 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.821443 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.821454 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.821472 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.821484 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:22Z","lastTransitionTime":"2025-11-25T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.923766 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.923819 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.923829 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.923868 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:22 crc kubenswrapper[4800]: I1125 15:18:22.923881 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:22Z","lastTransitionTime":"2025-11-25T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.026875 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.026934 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.026948 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.026971 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.026986 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:23Z","lastTransitionTime":"2025-11-25T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.035570 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovnkube-controller/2.log" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.036046 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovnkube-controller/1.log" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.039016 4800 generic.go:334] "Generic (PLEG): container finished" podID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerID="6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415" exitCode=1 Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.039084 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415"} Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.039157 4800 scope.go:117] "RemoveContainer" containerID="4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.040281 4800 scope.go:117] "RemoveContainer" containerID="6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415" Nov 25 15:18:23 crc kubenswrapper[4800]: E1125 15:18:23.040590 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.063073 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.077773 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.093431 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.111739 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.129310 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.130237 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.130292 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.130305 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.130329 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.130346 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:23Z","lastTransitionTime":"2025-11-25T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.143759 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51d32445-e5a5-4ec4-a738-6ddfd5aa494f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8784e959be8194e732572ea918ad9c2b97bd26e2cee3213b20d968cc3688aed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ff2f8c2e88b40d0f60aa08ddf34a804883aecc946cfef5dc366108603b49d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48be4d0f7dd1ac847125ec8b42ea938331c5cb0ab2f86081de6abcd43cd08d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.163407 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.179789 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.191810 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.207024 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.224198 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.234243 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.234281 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.234290 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.234307 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.234320 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:23Z","lastTransitionTime":"2025-11-25T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.240754 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.254352 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.267703 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.283367 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.296459 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.318768 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:04Z\\\",\\\"message\\\":\\\"lector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1125 15:18:04.447101 6296 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447534 6296 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447750 6296 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448103 6296 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448804 6296 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448985 6296 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449340 6296 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449925 6296 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:22Z\\\",\\\"message\\\":\\\"3 for removal\\\\nI1125 15:18:21.995883 6547 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 15:18:21.995916 6547 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 15:18:21.996202 6547 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 15:18:21.996221 6547 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 15:18:21.996234 6547 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 15:18:21.996246 6547 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 15:18:21.996255 6547 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 15:18:21.996284 6547 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 15:18:21.996348 6547 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 15:18:21.996372 6547 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 15:18:21.996373 6547 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 15:18:21.996390 6547 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 15:18:21.996401 6547 factory.go:656] Stopping watch factory\\\\nI1125 15:18:21.996438 6547 ovnkube.go:599] Stopped ovnkube\\\\nI1125 15:18:21.996462 6547 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 15:18:21.996494 6547 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 15:18:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:23Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.337506 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.337578 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.337597 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.337625 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.337646 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:23Z","lastTransitionTime":"2025-11-25T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.440231 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.440286 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.440298 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.440320 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.440333 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:23Z","lastTransitionTime":"2025-11-25T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.544197 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.544293 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.544318 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.544351 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.544375 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:23Z","lastTransitionTime":"2025-11-25T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.647054 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.647163 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.647188 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.647216 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.647233 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:23Z","lastTransitionTime":"2025-11-25T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.750630 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.750714 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.750740 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.750767 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.750783 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:23Z","lastTransitionTime":"2025-11-25T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.784374 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:23 crc kubenswrapper[4800]: E1125 15:18:23.784620 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.784988 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:23 crc kubenswrapper[4800]: E1125 15:18:23.785317 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.854416 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.854474 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.854496 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.854520 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.854542 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:23Z","lastTransitionTime":"2025-11-25T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.958492 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.958584 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.958611 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.958897 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:23 crc kubenswrapper[4800]: I1125 15:18:23.958933 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:23Z","lastTransitionTime":"2025-11-25T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.049589 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovnkube-controller/2.log" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.060865 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.060910 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.060923 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.060942 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.060955 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:24Z","lastTransitionTime":"2025-11-25T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.164683 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.164730 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.164744 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.164784 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.164801 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:24Z","lastTransitionTime":"2025-11-25T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.269778 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.269874 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.269893 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.269914 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.269945 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:24Z","lastTransitionTime":"2025-11-25T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.373238 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.373290 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.373301 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.373322 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.373336 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:24Z","lastTransitionTime":"2025-11-25T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.477937 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.477994 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.478009 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.478031 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.478047 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:24Z","lastTransitionTime":"2025-11-25T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.582230 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.582334 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.582360 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.582396 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.582425 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:24Z","lastTransitionTime":"2025-11-25T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.685300 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.685372 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.685388 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.685412 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.685426 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:24Z","lastTransitionTime":"2025-11-25T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.784744 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.784827 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:24 crc kubenswrapper[4800]: E1125 15:18:24.784938 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:24 crc kubenswrapper[4800]: E1125 15:18:24.785106 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.787952 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.787979 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.787987 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.788002 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.788015 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:24Z","lastTransitionTime":"2025-11-25T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.891491 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.891576 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.891601 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.891635 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.891662 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:24Z","lastTransitionTime":"2025-11-25T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.995266 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.995329 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.995341 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.995365 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:24 crc kubenswrapper[4800]: I1125 15:18:24.995383 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:24Z","lastTransitionTime":"2025-11-25T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.099140 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.099208 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.099225 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.099249 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.099265 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:25Z","lastTransitionTime":"2025-11-25T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.202781 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.202862 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.202880 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.202907 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.202921 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:25Z","lastTransitionTime":"2025-11-25T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.306247 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.306285 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.306296 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.306314 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.306326 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:25Z","lastTransitionTime":"2025-11-25T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.409303 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.409378 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.409424 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.409453 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.409477 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:25Z","lastTransitionTime":"2025-11-25T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.512385 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.512453 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.512465 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.512489 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.512505 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:25Z","lastTransitionTime":"2025-11-25T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.614738 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.614823 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.614861 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.614884 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.614898 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:25Z","lastTransitionTime":"2025-11-25T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.717591 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.718110 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.718142 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.718169 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.718185 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:25Z","lastTransitionTime":"2025-11-25T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.785133 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:25 crc kubenswrapper[4800]: E1125 15:18:25.785789 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.785267 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:25 crc kubenswrapper[4800]: E1125 15:18:25.786097 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.820597 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.820636 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.820649 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.820666 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.820678 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:25Z","lastTransitionTime":"2025-11-25T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.924058 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.924111 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.924122 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.924140 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:25 crc kubenswrapper[4800]: I1125 15:18:25.924155 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:25Z","lastTransitionTime":"2025-11-25T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.027326 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.027378 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.027396 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.027423 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.027444 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:26Z","lastTransitionTime":"2025-11-25T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.130396 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.130457 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.130468 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.130489 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.130502 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:26Z","lastTransitionTime":"2025-11-25T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.233042 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.233105 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.233120 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.233144 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.233159 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:26Z","lastTransitionTime":"2025-11-25T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.336033 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.336093 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.336103 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.336123 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.336140 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:26Z","lastTransitionTime":"2025-11-25T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.439424 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.439501 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.439521 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.439553 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.439577 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:26Z","lastTransitionTime":"2025-11-25T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.542468 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.542528 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.542542 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.542563 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.542577 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:26Z","lastTransitionTime":"2025-11-25T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.647365 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.647424 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.647437 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.647457 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.647472 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:26Z","lastTransitionTime":"2025-11-25T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.750112 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.750154 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.750165 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.750179 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.750190 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:26Z","lastTransitionTime":"2025-11-25T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.784903 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.784988 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:26 crc kubenswrapper[4800]: E1125 15:18:26.785054 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:26 crc kubenswrapper[4800]: E1125 15:18:26.785179 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.853665 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.853738 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.853749 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.853764 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.853779 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:26Z","lastTransitionTime":"2025-11-25T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.957308 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.957375 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.957392 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.957420 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:26 crc kubenswrapper[4800]: I1125 15:18:26.957438 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:26Z","lastTransitionTime":"2025-11-25T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.062161 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.062270 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.062290 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.062319 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.062377 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.165524 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.165585 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.165598 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.165619 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.165634 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.270464 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.270516 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.270526 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.270547 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.270560 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.379143 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.379211 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.379225 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.379250 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.379267 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.482464 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.482506 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.482520 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.482540 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.482552 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.585607 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.585651 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.585662 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.585682 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.585696 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.688247 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.688310 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.688323 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.688345 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.688360 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.768482 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.768564 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.768583 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.768638 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.768670 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.785578 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.785647 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:27 crc kubenswrapper[4800]: E1125 15:18:27.785713 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:27 crc kubenswrapper[4800]: E1125 15:18:27.785922 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:27 crc kubenswrapper[4800]: E1125 15:18:27.792754 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:27Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.797435 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.797498 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.797513 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.797529 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.797542 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: E1125 15:18:27.817364 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:27Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.821704 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.821758 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.821775 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.821801 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.821821 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: E1125 15:18:27.842450 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:27Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.846014 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.846039 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.846047 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.846061 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.846071 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: E1125 15:18:27.863700 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:27Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.868678 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.868716 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.868729 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.868745 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.868756 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:27 crc kubenswrapper[4800]: E1125 15:18:27.895964 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:27Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:27 crc kubenswrapper[4800]: E1125 15:18:27.896241 4800 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.904799 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.904893 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.904914 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.904941 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:27 crc kubenswrapper[4800]: I1125 15:18:27.904960 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:27Z","lastTransitionTime":"2025-11-25T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.008138 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.008189 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.008200 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.008220 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.008234 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:28Z","lastTransitionTime":"2025-11-25T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.110969 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.111006 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.111014 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.111030 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.111040 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:28Z","lastTransitionTime":"2025-11-25T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.214325 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.214379 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.214392 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.214412 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.214428 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:28Z","lastTransitionTime":"2025-11-25T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.316918 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.316963 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.316973 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.316991 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.317005 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:28Z","lastTransitionTime":"2025-11-25T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.339688 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:28 crc kubenswrapper[4800]: E1125 15:18:28.339976 4800 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:18:28 crc kubenswrapper[4800]: E1125 15:18:28.341108 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs podName:3e0ee245-1a7f-4428-bbd9-50de79d2cbd8 nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.340080036 +0000 UTC m=+101.394488618 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs") pod "network-metrics-daemon-fjqzf" (UID: "3e0ee245-1a7f-4428-bbd9-50de79d2cbd8") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.419908 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.419957 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.419967 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.419986 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.419997 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:28Z","lastTransitionTime":"2025-11-25T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.525545 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.525604 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.525617 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.525645 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.525658 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:28Z","lastTransitionTime":"2025-11-25T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.628416 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.628464 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.628476 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.628497 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.628511 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:28Z","lastTransitionTime":"2025-11-25T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.731605 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.731677 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.731694 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.731721 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.731739 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:28Z","lastTransitionTime":"2025-11-25T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.785612 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.785626 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:28 crc kubenswrapper[4800]: E1125 15:18:28.785896 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:28 crc kubenswrapper[4800]: E1125 15:18:28.786040 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.834591 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.834650 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.834662 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.834686 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.834703 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:28Z","lastTransitionTime":"2025-11-25T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.937325 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.937376 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.937387 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.937408 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:28 crc kubenswrapper[4800]: I1125 15:18:28.937420 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:28Z","lastTransitionTime":"2025-11-25T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.042056 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.042102 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.042117 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.042136 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.042150 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:29Z","lastTransitionTime":"2025-11-25T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.145201 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.145264 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.145279 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.145303 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.145318 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:29Z","lastTransitionTime":"2025-11-25T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.248351 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.248656 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.248740 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.248824 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.248934 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:29Z","lastTransitionTime":"2025-11-25T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.351968 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.352337 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.352411 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.352477 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.352534 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:29Z","lastTransitionTime":"2025-11-25T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.455375 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.455722 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.455907 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.456022 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.456113 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:29Z","lastTransitionTime":"2025-11-25T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.558495 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.558548 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.558566 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.558593 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.558611 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:29Z","lastTransitionTime":"2025-11-25T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.661949 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.661997 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.662007 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.662025 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.662036 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:29Z","lastTransitionTime":"2025-11-25T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.764910 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.764966 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.764981 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.765002 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.765014 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:29Z","lastTransitionTime":"2025-11-25T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.784329 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.784416 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:29 crc kubenswrapper[4800]: E1125 15:18:29.784453 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:29 crc kubenswrapper[4800]: E1125 15:18:29.784593 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.802587 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.816221 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51d32445-e5a5-4ec4-a738-6ddfd5aa494f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8784e959be8194e732572ea918ad9c2b97bd26e2cee3213b20d968cc3688aed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ff2f8c2e88b40d0f60aa08ddf34a804883aecc946cfef5dc366108603b49d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48be4d0f7dd1ac847125ec8b42ea938331c5cb0ab2f86081de6abcd43cd08d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.829213 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.845580 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.861798 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.867283 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.867317 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.867329 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.867346 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.867357 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:29Z","lastTransitionTime":"2025-11-25T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.883181 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.897442 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.910480 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.921214 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.933795 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.947057 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.957810 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.971275 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.971317 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.971327 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.971344 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.971354 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:29Z","lastTransitionTime":"2025-11-25T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.975904 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e034c909fe94631016cad3718531405b67707b7b151466d6a1160b6931bfa9c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:04Z\\\",\\\"message\\\":\\\"lector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1125 15:18:04.447101 6296 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447534 6296 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 15:18:04.447750 6296 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448103 6296 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448804 6296 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.448985 6296 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449340 6296 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 15:18:04.449925 6296 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:22Z\\\",\\\"message\\\":\\\"3 for removal\\\\nI1125 15:18:21.995883 6547 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 15:18:21.995916 6547 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 15:18:21.996202 6547 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 15:18:21.996221 6547 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 15:18:21.996234 6547 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 15:18:21.996246 6547 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 15:18:21.996255 6547 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 15:18:21.996284 6547 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 15:18:21.996348 6547 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 15:18:21.996372 6547 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 15:18:21.996373 6547 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 15:18:21.996390 6547 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 15:18:21.996401 6547 factory.go:656] Stopping watch factory\\\\nI1125 15:18:21.996438 6547 ovnkube.go:599] Stopped ovnkube\\\\nI1125 15:18:21.996462 6547 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 15:18:21.996494 6547 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 15:18:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:29 crc kubenswrapper[4800]: I1125 15:18:29.991308 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:29Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.007816 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:30Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.022728 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:30Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.048436 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:30Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.074024 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.074082 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.074096 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.074116 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.074129 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:30Z","lastTransitionTime":"2025-11-25T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.176496 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.176545 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.176555 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.176575 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.176587 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:30Z","lastTransitionTime":"2025-11-25T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.279527 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.279582 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.279595 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.279617 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.279630 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:30Z","lastTransitionTime":"2025-11-25T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.382466 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.382535 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.382552 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.382578 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.382601 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:30Z","lastTransitionTime":"2025-11-25T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.485406 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.485476 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.485494 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.485523 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.485548 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:30Z","lastTransitionTime":"2025-11-25T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.590671 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.590737 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.590749 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.590766 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.590777 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:30Z","lastTransitionTime":"2025-11-25T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.693516 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.693577 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.693595 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.693625 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.693641 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:30Z","lastTransitionTime":"2025-11-25T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.785176 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.785289 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:30 crc kubenswrapper[4800]: E1125 15:18:30.785306 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:30 crc kubenswrapper[4800]: E1125 15:18:30.785481 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.796822 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.796914 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.796944 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.796975 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.796992 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:30Z","lastTransitionTime":"2025-11-25T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.899531 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.899586 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.899600 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.899620 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:30 crc kubenswrapper[4800]: I1125 15:18:30.899634 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:30Z","lastTransitionTime":"2025-11-25T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.001509 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.001548 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.001558 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.001573 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.001582 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:31Z","lastTransitionTime":"2025-11-25T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.104959 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.105035 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.105052 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.105077 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.105095 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:31Z","lastTransitionTime":"2025-11-25T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.208225 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.208273 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.208284 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.208304 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.208317 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:31Z","lastTransitionTime":"2025-11-25T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.311358 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.311423 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.311442 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.311468 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.311485 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:31Z","lastTransitionTime":"2025-11-25T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.414705 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.415125 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.415135 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.415156 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.415171 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:31Z","lastTransitionTime":"2025-11-25T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.518097 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.518148 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.518163 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.518187 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.518198 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:31Z","lastTransitionTime":"2025-11-25T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.621878 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.621937 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.621949 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.621968 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.621980 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:31Z","lastTransitionTime":"2025-11-25T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.727255 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.727333 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.727346 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.727370 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.727385 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:31Z","lastTransitionTime":"2025-11-25T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.784911 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.785005 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:31 crc kubenswrapper[4800]: E1125 15:18:31.785154 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:31 crc kubenswrapper[4800]: E1125 15:18:31.785249 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.830037 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.830078 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.830090 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.830106 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.830117 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:31Z","lastTransitionTime":"2025-11-25T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.932986 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.933106 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.933130 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.933168 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:31 crc kubenswrapper[4800]: I1125 15:18:31.933195 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:31Z","lastTransitionTime":"2025-11-25T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.043480 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.043528 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.043554 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.043578 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.043594 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:32Z","lastTransitionTime":"2025-11-25T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.146351 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.146410 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.146420 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.146440 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.146452 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:32Z","lastTransitionTime":"2025-11-25T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.249543 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.249584 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.249596 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.249613 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.249623 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:32Z","lastTransitionTime":"2025-11-25T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.351887 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.351938 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.351947 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.351964 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.351975 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:32Z","lastTransitionTime":"2025-11-25T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.454549 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.454588 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.454599 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.454619 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.454629 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:32Z","lastTransitionTime":"2025-11-25T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.558173 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.558229 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.558238 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.558259 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.558271 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:32Z","lastTransitionTime":"2025-11-25T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.661274 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.661323 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.661333 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.661351 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.661362 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:32Z","lastTransitionTime":"2025-11-25T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.763904 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.763941 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.763951 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.763967 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.763979 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:32Z","lastTransitionTime":"2025-11-25T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.784687 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:32 crc kubenswrapper[4800]: E1125 15:18:32.784805 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.784974 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:32 crc kubenswrapper[4800]: E1125 15:18:32.785023 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.867120 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.867174 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.867187 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.867207 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.867222 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:32Z","lastTransitionTime":"2025-11-25T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.969910 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.969958 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.969969 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.969985 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:32 crc kubenswrapper[4800]: I1125 15:18:32.969994 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:32Z","lastTransitionTime":"2025-11-25T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.073178 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.073228 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.073240 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.073259 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.073272 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:33Z","lastTransitionTime":"2025-11-25T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.176446 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.176481 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.176490 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.176506 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.176516 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:33Z","lastTransitionTime":"2025-11-25T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.279402 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.279449 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.279461 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.279480 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.279492 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:33Z","lastTransitionTime":"2025-11-25T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.384202 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.384252 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.384272 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.384296 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.384315 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:33Z","lastTransitionTime":"2025-11-25T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.487134 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.487183 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.487193 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.487213 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.487226 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:33Z","lastTransitionTime":"2025-11-25T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.589738 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.589777 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.589788 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.589806 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.589817 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:33Z","lastTransitionTime":"2025-11-25T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.693055 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.693118 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.693139 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.693169 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.693190 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:33Z","lastTransitionTime":"2025-11-25T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.785055 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:33 crc kubenswrapper[4800]: E1125 15:18:33.785259 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.785557 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:33 crc kubenswrapper[4800]: E1125 15:18:33.785684 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.795184 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.795213 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.795222 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.795233 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.795243 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:33Z","lastTransitionTime":"2025-11-25T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.900095 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.900164 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.900184 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.900209 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:33 crc kubenswrapper[4800]: I1125 15:18:33.900231 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:33Z","lastTransitionTime":"2025-11-25T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.003581 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.003650 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.003665 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.003686 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.003703 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:34Z","lastTransitionTime":"2025-11-25T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.106610 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.106650 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.106659 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.106678 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.106688 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:34Z","lastTransitionTime":"2025-11-25T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.209549 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.209634 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.209650 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.209677 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.209697 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:34Z","lastTransitionTime":"2025-11-25T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.313028 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.313090 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.313106 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.313127 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.313140 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:34Z","lastTransitionTime":"2025-11-25T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.416516 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.416583 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.416601 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.416628 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.416646 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:34Z","lastTransitionTime":"2025-11-25T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.520070 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.520138 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.520155 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.520182 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.520200 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:34Z","lastTransitionTime":"2025-11-25T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.623574 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.623651 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.623670 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.623702 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.623725 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:34Z","lastTransitionTime":"2025-11-25T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.726913 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.726964 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.726975 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.726994 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.727006 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:34Z","lastTransitionTime":"2025-11-25T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.784407 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:34 crc kubenswrapper[4800]: E1125 15:18:34.784599 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.784667 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:34 crc kubenswrapper[4800]: E1125 15:18:34.784728 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.785297 4800 scope.go:117] "RemoveContainer" containerID="6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415" Nov 25 15:18:34 crc kubenswrapper[4800]: E1125 15:18:34.785527 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.799475 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:34Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.816182 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51d32445-e5a5-4ec4-a738-6ddfd5aa494f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8784e959be8194e732572ea918ad9c2b97bd26e2cee3213b20d968cc3688aed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ff2f8c2e88b40d0f60aa08ddf34a804883aecc946cfef5dc366108603b49d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48be4d0f7dd1ac847125ec8b42ea938331c5cb0ab2f86081de6abcd43cd08d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:34Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.830098 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.830158 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.830175 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.830200 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.830217 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:34Z","lastTransitionTime":"2025-11-25T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.832186 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:34Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.849497 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:34Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.867781 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:34Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.886785 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:34Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.904458 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:34Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.926439 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:34Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.932886 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.932937 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.932945 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.932962 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.932973 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:34Z","lastTransitionTime":"2025-11-25T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.944821 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:34Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.955919 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:34Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.970192 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:34Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:34 crc kubenswrapper[4800]: I1125 15:18:34.983740 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:34Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.009392 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:22Z\\\",\\\"message\\\":\\\"3 for removal\\\\nI1125 15:18:21.995883 6547 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 15:18:21.995916 6547 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 15:18:21.996202 6547 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 15:18:21.996221 6547 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 15:18:21.996234 6547 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 15:18:21.996246 6547 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 15:18:21.996255 6547 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 15:18:21.996284 6547 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 15:18:21.996348 6547 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 15:18:21.996372 6547 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 15:18:21.996373 6547 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 15:18:21.996390 6547 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 15:18:21.996401 6547 factory.go:656] Stopping watch factory\\\\nI1125 15:18:21.996438 6547 ovnkube.go:599] Stopped ovnkube\\\\nI1125 15:18:21.996462 6547 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 15:18:21.996494 6547 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 15:18:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:35Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.028253 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:35Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.035184 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.035219 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.035232 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.035278 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.035293 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:35Z","lastTransitionTime":"2025-11-25T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.043087 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:35Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.060445 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:35Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.075516 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:35Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.138754 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.138807 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.138824 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.138866 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.138882 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:35Z","lastTransitionTime":"2025-11-25T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.242282 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.242356 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.242380 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.242410 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.242433 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:35Z","lastTransitionTime":"2025-11-25T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.345832 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.345945 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.345969 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.346006 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.346027 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:35Z","lastTransitionTime":"2025-11-25T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.449353 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.449406 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.449418 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.449441 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.449453 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:35Z","lastTransitionTime":"2025-11-25T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.553000 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.553065 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.553082 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.553112 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.553134 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:35Z","lastTransitionTime":"2025-11-25T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.656941 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.657004 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.657025 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.657063 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.657081 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:35Z","lastTransitionTime":"2025-11-25T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.759934 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.760018 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.760031 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.760051 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.760063 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:35Z","lastTransitionTime":"2025-11-25T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.785208 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.785428 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:35 crc kubenswrapper[4800]: E1125 15:18:35.785559 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:35 crc kubenswrapper[4800]: E1125 15:18:35.785713 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.863580 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.863719 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.863737 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.863763 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.863781 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:35Z","lastTransitionTime":"2025-11-25T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.966507 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.966607 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.966626 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.966652 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:35 crc kubenswrapper[4800]: I1125 15:18:35.966667 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:35Z","lastTransitionTime":"2025-11-25T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.069993 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.070088 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.070112 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.070147 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.070179 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:36Z","lastTransitionTime":"2025-11-25T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.174172 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.174255 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.174274 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.174303 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.174322 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:36Z","lastTransitionTime":"2025-11-25T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.278053 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.278132 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.278152 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.278181 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.278201 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:36Z","lastTransitionTime":"2025-11-25T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.381492 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.381575 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.381592 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.381620 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.381638 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:36Z","lastTransitionTime":"2025-11-25T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.484536 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.484601 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.484623 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.484649 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.484668 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:36Z","lastTransitionTime":"2025-11-25T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.589007 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.589117 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.589131 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.589150 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.589162 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:36Z","lastTransitionTime":"2025-11-25T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.693070 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.693133 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.693151 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.693179 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.693196 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:36Z","lastTransitionTime":"2025-11-25T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.785032 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:36 crc kubenswrapper[4800]: E1125 15:18:36.785165 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.785335 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:36 crc kubenswrapper[4800]: E1125 15:18:36.785381 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.796185 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.796206 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.796214 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.796225 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.796233 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:36Z","lastTransitionTime":"2025-11-25T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.800948 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.899370 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.899427 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.899443 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.899467 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:36 crc kubenswrapper[4800]: I1125 15:18:36.899485 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:36Z","lastTransitionTime":"2025-11-25T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.003271 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.003331 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.003348 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.003375 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.003392 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:37Z","lastTransitionTime":"2025-11-25T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.105681 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.105759 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.105781 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.105810 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.105871 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:37Z","lastTransitionTime":"2025-11-25T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.209970 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.210036 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.210053 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.210083 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.210102 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:37Z","lastTransitionTime":"2025-11-25T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.313812 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.313910 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.313924 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.313946 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.313959 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:37Z","lastTransitionTime":"2025-11-25T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.416513 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.416614 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.416627 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.416648 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.416661 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:37Z","lastTransitionTime":"2025-11-25T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.519720 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.519779 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.519789 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.519806 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.519816 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:37Z","lastTransitionTime":"2025-11-25T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.622598 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.622654 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.622664 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.622680 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.622690 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:37Z","lastTransitionTime":"2025-11-25T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.725363 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.725506 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.725534 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.725567 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.725589 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:37Z","lastTransitionTime":"2025-11-25T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.785366 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:37 crc kubenswrapper[4800]: E1125 15:18:37.785503 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.785634 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:37 crc kubenswrapper[4800]: E1125 15:18:37.785934 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.829202 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.829245 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.829257 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.829277 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.829286 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:37Z","lastTransitionTime":"2025-11-25T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.932668 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.932717 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.932736 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.932759 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:37 crc kubenswrapper[4800]: I1125 15:18:37.932773 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:37Z","lastTransitionTime":"2025-11-25T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.035149 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.035187 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.035272 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.035289 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.035303 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.108762 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-nzxgf_0321f61a-9e40-47a2-b19f-a859fd6b890a/kube-multus/0.log" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.108822 4800 generic.go:334] "Generic (PLEG): container finished" podID="0321f61a-9e40-47a2-b19f-a859fd6b890a" containerID="7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5" exitCode=1 Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.108923 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-nzxgf" event={"ID":"0321f61a-9e40-47a2-b19f-a859fd6b890a","Type":"ContainerDied","Data":"7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.109586 4800 scope.go:117] "RemoveContainer" containerID="7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.132869 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.141110 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.141147 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.141156 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.141173 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.141185 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.152190 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.170033 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.186883 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.202925 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.217836 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d7c26ae-fde9-4407-b1ce-1eb82c4b3d3e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://470628f891898766fdab61961dd1b441cb35ae97f941cf532d8dbcdbd725a25c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89ce4a3f77be8bba1ad0d0416bcf90f9d72c2b181098e3c0640ad822968f306f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89ce4a3f77be8bba1ad0d0416bcf90f9d72c2b181098e3c0640ad822968f306f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.234542 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.244318 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.244365 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.244380 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.244403 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.244417 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.246367 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.267565 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:22Z\\\",\\\"message\\\":\\\"3 for removal\\\\nI1125 15:18:21.995883 6547 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 15:18:21.995916 6547 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 15:18:21.996202 6547 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 15:18:21.996221 6547 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 15:18:21.996234 6547 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 15:18:21.996246 6547 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 15:18:21.996255 6547 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 15:18:21.996284 6547 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 15:18:21.996348 6547 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 15:18:21.996372 6547 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 15:18:21.996373 6547 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 15:18:21.996390 6547 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 15:18:21.996401 6547 factory.go:656] Stopping watch factory\\\\nI1125 15:18:21.996438 6547 ovnkube.go:599] Stopped ovnkube\\\\nI1125 15:18:21.996462 6547 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 15:18:21.996494 6547 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 15:18:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.280272 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.290320 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.293747 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.293802 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.293813 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.293868 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.293880 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: E1125 15:18:38.305678 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.307748 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:37Z\\\",\\\"message\\\":\\\"2025-11-25T15:17:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b2b9cc6a-9fa4-477e-a685-cff0549441a0\\\\n2025-11-25T15:17:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b2b9cc6a-9fa4-477e-a685-cff0549441a0 to /host/opt/cni/bin/\\\\n2025-11-25T15:17:52Z [verbose] multus-daemon started\\\\n2025-11-25T15:17:52Z [verbose] Readiness Indicator file check\\\\n2025-11-25T15:18:37Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.311237 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.311271 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.311280 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.311292 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.311301 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.318708 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: E1125 15:18:38.329433 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.333534 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.333572 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.333583 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.333597 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.333608 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.335976 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.349687 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51d32445-e5a5-4ec4-a738-6ddfd5aa494f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8784e959be8194e732572ea918ad9c2b97bd26e2cee3213b20d968cc3688aed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ff2f8c2e88b40d0f60aa08ddf34a804883aecc946cfef5dc366108603b49d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48be4d0f7dd1ac847125ec8b42ea938331c5cb0ab2f86081de6abcd43cd08d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: E1125 15:18:38.351718 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.356397 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.356431 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.356440 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.356485 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.356498 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.367958 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: E1125 15:18:38.368462 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.372382 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.372533 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.372613 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.372699 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.372790 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.387002 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: E1125 15:18:38.389461 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"47449ece-b8c7-4e5f-9f20-f4807c2b7cf6\\\",\\\"systemUUID\\\":\\\"f3b20845-f0f6-45af-84f7-77c49c8161e9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: E1125 15:18:38.389624 4800 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.391987 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.392066 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.392080 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.392121 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.392133 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.401924 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:38Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.495936 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.496005 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.496022 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.496050 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.496067 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.598449 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.598817 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.598861 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.598891 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.598908 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.702171 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.702209 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.702217 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.702232 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.702240 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.784621 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.784727 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:38 crc kubenswrapper[4800]: E1125 15:18:38.784806 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:38 crc kubenswrapper[4800]: E1125 15:18:38.785102 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.805503 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.805543 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.805558 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.805579 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.805594 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.909463 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.909544 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.909566 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.909597 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:38 crc kubenswrapper[4800]: I1125 15:18:38.909621 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:38Z","lastTransitionTime":"2025-11-25T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.012517 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.012600 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.012620 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.012653 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.012683 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:39Z","lastTransitionTime":"2025-11-25T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.115168 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.115219 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.115233 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.115288 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.115304 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:39Z","lastTransitionTime":"2025-11-25T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.116184 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-nzxgf_0321f61a-9e40-47a2-b19f-a859fd6b890a/kube-multus/0.log" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.116299 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-nzxgf" event={"ID":"0321f61a-9e40-47a2-b19f-a859fd6b890a","Type":"ContainerStarted","Data":"f223a9adeb4f3035c5439f79a2c0e65bd4024420e203214517db732b46e41290"} Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.134080 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.155184 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.169271 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.187364 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.203210 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.214151 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.218241 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.218288 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.218350 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.218375 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.218387 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:39Z","lastTransitionTime":"2025-11-25T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.242284 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:22Z\\\",\\\"message\\\":\\\"3 for removal\\\\nI1125 15:18:21.995883 6547 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 15:18:21.995916 6547 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 15:18:21.996202 6547 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 15:18:21.996221 6547 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 15:18:21.996234 6547 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 15:18:21.996246 6547 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 15:18:21.996255 6547 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 15:18:21.996284 6547 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 15:18:21.996348 6547 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 15:18:21.996372 6547 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 15:18:21.996373 6547 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 15:18:21.996390 6547 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 15:18:21.996401 6547 factory.go:656] Stopping watch factory\\\\nI1125 15:18:21.996438 6547 ovnkube.go:599] Stopped ovnkube\\\\nI1125 15:18:21.996462 6547 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 15:18:21.996494 6547 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 15:18:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.253561 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d7c26ae-fde9-4407-b1ce-1eb82c4b3d3e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://470628f891898766fdab61961dd1b441cb35ae97f941cf532d8dbcdbd725a25c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89ce4a3f77be8bba1ad0d0416bcf90f9d72c2b181098e3c0640ad822968f306f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89ce4a3f77be8bba1ad0d0416bcf90f9d72c2b181098e3c0640ad822968f306f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.268642 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.277825 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.289240 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f223a9adeb4f3035c5439f79a2c0e65bd4024420e203214517db732b46e41290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:37Z\\\",\\\"message\\\":\\\"2025-11-25T15:17:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b2b9cc6a-9fa4-477e-a685-cff0549441a0\\\\n2025-11-25T15:17:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b2b9cc6a-9fa4-477e-a685-cff0549441a0 to /host/opt/cni/bin/\\\\n2025-11-25T15:17:52Z [verbose] multus-daemon started\\\\n2025-11-25T15:17:52Z [verbose] Readiness Indicator file check\\\\n2025-11-25T15:18:37Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.297704 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.308073 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.320720 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.320756 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.320768 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.320789 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.320799 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:39Z","lastTransitionTime":"2025-11-25T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.323089 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.337458 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.349808 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.364908 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.381987 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51d32445-e5a5-4ec4-a738-6ddfd5aa494f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8784e959be8194e732572ea918ad9c2b97bd26e2cee3213b20d968cc3688aed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ff2f8c2e88b40d0f60aa08ddf34a804883aecc946cfef5dc366108603b49d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48be4d0f7dd1ac847125ec8b42ea938331c5cb0ab2f86081de6abcd43cd08d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.423543 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.423815 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.423924 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.423997 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.424058 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:39Z","lastTransitionTime":"2025-11-25T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.527023 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.528051 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.528099 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.528124 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.528140 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:39Z","lastTransitionTime":"2025-11-25T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.631509 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.631563 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.631576 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.631600 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.631613 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:39Z","lastTransitionTime":"2025-11-25T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.734704 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.734789 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.734812 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.734878 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.734916 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:39Z","lastTransitionTime":"2025-11-25T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.785007 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.785057 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:39 crc kubenswrapper[4800]: E1125 15:18:39.785209 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:39 crc kubenswrapper[4800]: E1125 15:18:39.785474 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.805609 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://98d8401de938131ea1989d1eb7959c4f02534d6b5b0a49233b6d4de20976d241\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.823117 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6tshx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33cfcf36-9b68-42c7-bc9c-261a04435b92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6cc803696797c87186cb10e3290a9354356c376ad36cc8cbbfa01f1c51498bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7m9hk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6tshx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.837467 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.837500 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.837534 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.837553 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.837562 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:39Z","lastTransitionTime":"2025-11-25T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.844115 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"511c09cb-7edd-4195-bc55-233f51435125\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 15:17:40.726449 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 15:17:40.726771 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 15:17:40.727753 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2950602164/tls.crt::/tmp/serving-cert-2950602164/tls.key\\\\\\\"\\\\nI1125 15:17:41.263740 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 15:17:41.273886 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 15:17:41.273922 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 15:17:41.273950 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 15:17:41.273963 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 15:17:41.284009 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 15:17:41.284044 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284051 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 15:17:41.284059 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 15:17:41.284064 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 15:17:41.284068 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 15:17:41.284074 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 15:17:41.284293 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 15:17:41.288300 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.863414 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.884026 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d53d17b3c535df88d854eec14688d5b27d036dae5ef458ff3344bee44b38a9ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28995b6be82b265db19cb1d040b339d9525d68eba00f3f842b8ff4732fb3d1ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.914148 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80e4f44d-4647-4e15-a29f-2672fc065d82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:22Z\\\",\\\"message\\\":\\\"3 for removal\\\\nI1125 15:18:21.995883 6547 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 15:18:21.995916 6547 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 15:18:21.996202 6547 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 15:18:21.996221 6547 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 15:18:21.996234 6547 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 15:18:21.996246 6547 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 15:18:21.996255 6547 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 15:18:21.996284 6547 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 15:18:21.996348 6547 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 15:18:21.996372 6547 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 15:18:21.996373 6547 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 15:18:21.996390 6547 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 15:18:21.996401 6547 factory.go:656] Stopping watch factory\\\\nI1125 15:18:21.996438 6547 ovnkube.go:599] Stopped ovnkube\\\\nI1125 15:18:21.996462 6547 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 15:18:21.996494 6547 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 15:18:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:18:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvthw_openshift-ovn-kubernetes(80e4f44d-4647-4e15-a29f-2672fc065d82)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4jbt5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvthw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.931743 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d7c26ae-fde9-4407-b1ce-1eb82c4b3d3e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://470628f891898766fdab61961dd1b441cb35ae97f941cf532d8dbcdbd725a25c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89ce4a3f77be8bba1ad0d0416bcf90f9d72c2b181098e3c0640ad822968f306f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89ce4a3f77be8bba1ad0d0416bcf90f9d72c2b181098e3c0640ad822968f306f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.939275 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.939307 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.939318 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.939337 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.939346 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:39Z","lastTransitionTime":"2025-11-25T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.953291 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.970687 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a80af7a-a7d6-4433-97da-7d5d015cd401\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e96821526874df3bbe1d4a3fc9f9c40758663bacdf07999d8d1148e325caa07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5465\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hvg6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:39 crc kubenswrapper[4800]: I1125 15:18:39.993384 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-nzxgf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0321f61a-9e40-47a2-b19f-a859fd6b890a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f223a9adeb4f3035c5439f79a2c0e65bd4024420e203214517db732b46e41290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T15:18:37Z\\\",\\\"message\\\":\\\"2025-11-25T15:17:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b2b9cc6a-9fa4-477e-a685-cff0549441a0\\\\n2025-11-25T15:17:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b2b9cc6a-9fa4-477e-a685-cff0549441a0 to /host/opt/cni/bin/\\\\n2025-11-25T15:17:52Z [verbose] multus-daemon started\\\\n2025-11-25T15:17:52Z [verbose] Readiness Indicator file check\\\\n2025-11-25T15:18:37Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-24wm9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-nzxgf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:39Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.008333 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8g524\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fjqzf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:40Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.029199 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50f4459256fae4f0bd5e7bfca6500c63992daf551ac2a660ca5ef15f85cbf14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:40Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.041868 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-6qf5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6935887-df54-43b1-a2ad-0cfb3c9d65dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdec073e6da7f9d078ed3fb77021f5ff53a7a0ffa8c644473dd7f4702b8f083b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-62d7m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-6qf5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:40Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.042683 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.042944 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.043168 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.043452 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.044009 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:40Z","lastTransitionTime":"2025-11-25T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.058921 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-84zhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69669849-59a1-47d8-9583-4ed964926242\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1bb3401d85904c4c5ff1f0ba589d74644da5082ff9655bbeedf10d0d1015584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7e11dfc76c910893253087d37b5ac91a0a2ac1d70218e18e8f8c3c37438568a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fc31b29d9f2c8f2cf31df94e0320772a48b1ff4d4413be3fb9dcff734f6126c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea9fcf8c25ab27395a8f28a27ff0a5834c1d6e61d68b758f7dbe7fc3c841426\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23acab51077a690e90931e17ee217a9b36b6449b4677c20cb4d58f3becb2540e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8be3ac7e6a28b705ef99b79348548005d8c349ec8ef39193995f486fbc66e02d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50b56825bb38ccbf414cebbd0399e994239754bb918329870d546c214594a1fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2v95d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-84zhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:40Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.075872 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190f9d43-ec5b-488c-92fb-d522b746a16d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4d6288b54f6a7e988afd9267bcc7122101a8a9090fb0021a0e7929a175ce34a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7b982a8dffff273c5c9fc4dfbf4b1ec871a7be4d57b609e1ad5abe0d363500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twfcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-895nk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:40Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.093438 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a7a4245-ffb2-4023-9e02-fe7efea8b547\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29320fe71fde3b94e79746145c8c3817055c504a24b29ed9b645b9186b762d01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://389f9c3d7c6db1df0846d3496f61c4c55bab3c8917fb21da6be299b907d218c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5d55a40a5a2a943ecefa5a4779245cf4e678a6daf5b01c7751679f7c12b02b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:40Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.106601 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51d32445-e5a5-4ec4-a738-6ddfd5aa494f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:18:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8784e959be8194e732572ea918ad9c2b97bd26e2cee3213b20d968cc3688aed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ff2f8c2e88b40d0f60aa08ddf34a804883aecc946cfef5dc366108603b49d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48be4d0f7dd1ac847125ec8b42ea938331c5cb0ab2f86081de6abcd43cd08d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec3fe8613028e6ea82baf2a83456249dcfc1d2f8d809d9a357efc9f78d614703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T15:17:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T15:17:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T15:17:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:40Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.123437 4800 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T15:17:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T15:18:40Z is after 2025-08-24T17:21:41Z" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.147344 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.147682 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.147747 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.147954 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.148040 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:40Z","lastTransitionTime":"2025-11-25T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.251043 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.251091 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.251102 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.251120 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.251133 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:40Z","lastTransitionTime":"2025-11-25T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.354181 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.354228 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.354237 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.354256 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.354265 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:40Z","lastTransitionTime":"2025-11-25T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.457391 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.457433 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.457445 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.457461 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.457471 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:40Z","lastTransitionTime":"2025-11-25T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.560718 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.560768 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.560779 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.560798 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.560810 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:40Z","lastTransitionTime":"2025-11-25T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.663786 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.663836 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.663884 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.663924 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.663945 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:40Z","lastTransitionTime":"2025-11-25T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.767036 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.767120 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.767146 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.767180 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.767208 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:40Z","lastTransitionTime":"2025-11-25T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.785262 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.785316 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:40 crc kubenswrapper[4800]: E1125 15:18:40.785378 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:40 crc kubenswrapper[4800]: E1125 15:18:40.785526 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.871052 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.871134 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.871154 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.871182 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.871202 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:40Z","lastTransitionTime":"2025-11-25T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.974741 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.974799 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.974811 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.974833 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:40 crc kubenswrapper[4800]: I1125 15:18:40.974864 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:40Z","lastTransitionTime":"2025-11-25T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.078308 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.078366 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.078384 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.078410 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.078428 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:41Z","lastTransitionTime":"2025-11-25T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.181222 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.181280 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.181322 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.181355 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.181375 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:41Z","lastTransitionTime":"2025-11-25T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.284894 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.284959 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.284978 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.285007 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.285027 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:41Z","lastTransitionTime":"2025-11-25T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.394942 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.395011 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.395020 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.395039 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.395050 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:41Z","lastTransitionTime":"2025-11-25T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.497827 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.497920 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.497932 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.497953 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.497971 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:41Z","lastTransitionTime":"2025-11-25T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.601412 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.601500 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.601527 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.601569 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.601597 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:41Z","lastTransitionTime":"2025-11-25T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.704927 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.704984 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.704995 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.705015 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.705026 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:41Z","lastTransitionTime":"2025-11-25T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.784792 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.784934 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:41 crc kubenswrapper[4800]: E1125 15:18:41.785084 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:41 crc kubenswrapper[4800]: E1125 15:18:41.785181 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.807993 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.808044 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.808059 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.808080 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.808094 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:41Z","lastTransitionTime":"2025-11-25T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.911590 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.911688 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.911714 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.911751 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:41 crc kubenswrapper[4800]: I1125 15:18:41.911784 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:41Z","lastTransitionTime":"2025-11-25T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.016575 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.016628 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.016640 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.016660 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.016671 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:42Z","lastTransitionTime":"2025-11-25T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.119734 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.119789 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.119804 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.119826 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.119864 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:42Z","lastTransitionTime":"2025-11-25T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.223294 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.223335 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.223345 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.223362 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.223371 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:42Z","lastTransitionTime":"2025-11-25T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.326117 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.326160 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.326171 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.326191 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.326203 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:42Z","lastTransitionTime":"2025-11-25T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.429063 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.429124 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.429139 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.429159 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.429171 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:42Z","lastTransitionTime":"2025-11-25T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.531806 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.531872 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.531889 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.531912 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.531929 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:42Z","lastTransitionTime":"2025-11-25T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.641202 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.641250 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.641259 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.641275 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.641285 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:42Z","lastTransitionTime":"2025-11-25T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.743450 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.743494 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.743507 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.743527 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.743542 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:42Z","lastTransitionTime":"2025-11-25T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.784985 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.785016 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:42 crc kubenswrapper[4800]: E1125 15:18:42.785142 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:42 crc kubenswrapper[4800]: E1125 15:18:42.785266 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.846149 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.846206 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.846222 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.846243 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.846258 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:42Z","lastTransitionTime":"2025-11-25T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.949371 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.949438 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.949455 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.949481 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:42 crc kubenswrapper[4800]: I1125 15:18:42.949498 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:42Z","lastTransitionTime":"2025-11-25T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.052189 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.052251 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.052268 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.052295 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.052313 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:43Z","lastTransitionTime":"2025-11-25T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.154295 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.154332 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.154341 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.154357 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.154366 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:43Z","lastTransitionTime":"2025-11-25T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.257365 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.257434 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.257454 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.257494 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.257529 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:43Z","lastTransitionTime":"2025-11-25T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.360011 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.360056 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.360066 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.360084 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.360096 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:43Z","lastTransitionTime":"2025-11-25T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.463365 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.463423 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.463439 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.463462 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.463474 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:43Z","lastTransitionTime":"2025-11-25T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.566638 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.566715 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.566740 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.566767 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.566784 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:43Z","lastTransitionTime":"2025-11-25T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.670391 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.670458 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.670479 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.670509 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.670530 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:43Z","lastTransitionTime":"2025-11-25T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.774246 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.774323 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.774345 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.774369 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.774386 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:43Z","lastTransitionTime":"2025-11-25T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.785100 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.785137 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:43 crc kubenswrapper[4800]: E1125 15:18:43.785313 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:43 crc kubenswrapper[4800]: E1125 15:18:43.785463 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.878006 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.878066 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.878077 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.878095 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.878104 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:43Z","lastTransitionTime":"2025-11-25T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.981763 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.981818 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.981839 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.981905 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:43 crc kubenswrapper[4800]: I1125 15:18:43.981927 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:43Z","lastTransitionTime":"2025-11-25T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.084955 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.085068 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.085088 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.085118 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.085155 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:44Z","lastTransitionTime":"2025-11-25T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.188312 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.188429 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.188454 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.188489 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.188512 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:44Z","lastTransitionTime":"2025-11-25T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.291699 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.291951 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.291965 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.291985 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.292003 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:44Z","lastTransitionTime":"2025-11-25T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.395373 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.395416 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.395435 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.395455 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.395466 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:44Z","lastTransitionTime":"2025-11-25T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.498249 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.498328 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.498346 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.498372 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.498389 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:44Z","lastTransitionTime":"2025-11-25T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.542927 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.543083 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543111 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:48.543081875 +0000 UTC m=+149.597490357 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.543170 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543237 4800 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.543256 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543304 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:19:48.54328659 +0000 UTC m=+149.597695082 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.543330 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543355 4800 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543426 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543449 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543459 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 15:19:48.543431414 +0000 UTC m=+149.597839906 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543464 4800 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543510 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 15:19:48.543502065 +0000 UTC m=+149.597910557 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543428 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543539 4800 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543554 4800 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.543603 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 15:19:48.543592198 +0000 UTC m=+149.598000730 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.601512 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.601549 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.601559 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.601576 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.601587 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:44Z","lastTransitionTime":"2025-11-25T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.704149 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.704201 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.704215 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.704239 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.704255 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:44Z","lastTransitionTime":"2025-11-25T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.784870 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.784915 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.785063 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:44 crc kubenswrapper[4800]: E1125 15:18:44.785213 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.807039 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.807078 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.807090 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.807106 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.807115 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:44Z","lastTransitionTime":"2025-11-25T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.910587 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.910643 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.910659 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.910679 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:44 crc kubenswrapper[4800]: I1125 15:18:44.910694 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:44Z","lastTransitionTime":"2025-11-25T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.014420 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.014507 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.014531 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.014565 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.014590 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:45Z","lastTransitionTime":"2025-11-25T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.119045 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.119098 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.119109 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.119127 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.119137 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:45Z","lastTransitionTime":"2025-11-25T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.225787 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.225826 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.225835 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.225865 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.225877 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:45Z","lastTransitionTime":"2025-11-25T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.328685 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.328733 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.328746 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.328765 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.328776 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:45Z","lastTransitionTime":"2025-11-25T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.430868 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.430913 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.430921 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.430938 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.430947 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:45Z","lastTransitionTime":"2025-11-25T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.533727 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.533791 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.533807 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.533829 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.533883 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:45Z","lastTransitionTime":"2025-11-25T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.636898 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.636959 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.636972 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.636992 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.637005 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:45Z","lastTransitionTime":"2025-11-25T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.740096 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.740137 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.740148 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.740165 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.740176 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:45Z","lastTransitionTime":"2025-11-25T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.785123 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.785215 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:45 crc kubenswrapper[4800]: E1125 15:18:45.785307 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:45 crc kubenswrapper[4800]: E1125 15:18:45.785533 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.800811 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.842312 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.842390 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.842400 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.842419 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.842430 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:45Z","lastTransitionTime":"2025-11-25T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.944707 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.944769 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.944785 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.944808 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:45 crc kubenswrapper[4800]: I1125 15:18:45.944825 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:45Z","lastTransitionTime":"2025-11-25T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.048104 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.048174 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.048191 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.048223 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.048241 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:46Z","lastTransitionTime":"2025-11-25T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.150509 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.150587 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.150612 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.150642 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.150666 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:46Z","lastTransitionTime":"2025-11-25T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.253117 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.253168 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.253181 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.253201 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.253213 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:46Z","lastTransitionTime":"2025-11-25T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.355682 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.355737 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.355754 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.355777 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.355794 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:46Z","lastTransitionTime":"2025-11-25T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.459161 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.459222 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.459239 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.459265 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.459284 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:46Z","lastTransitionTime":"2025-11-25T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.562429 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.562497 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.562516 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.562543 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.562599 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:46Z","lastTransitionTime":"2025-11-25T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.665610 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.665680 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.665692 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.665715 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.665730 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:46Z","lastTransitionTime":"2025-11-25T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.768498 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.768557 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.768568 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.768583 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.768595 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:46Z","lastTransitionTime":"2025-11-25T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.784901 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:46 crc kubenswrapper[4800]: E1125 15:18:46.785041 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.785116 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:46 crc kubenswrapper[4800]: E1125 15:18:46.785409 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.871162 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.871270 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.871285 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.871307 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.871321 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:46Z","lastTransitionTime":"2025-11-25T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.973705 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.973751 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.973763 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.973793 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:46 crc kubenswrapper[4800]: I1125 15:18:46.973810 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:46Z","lastTransitionTime":"2025-11-25T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.076921 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.076975 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.076984 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.077001 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.077011 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:47Z","lastTransitionTime":"2025-11-25T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.179707 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.179764 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.179776 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.179794 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.179805 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:47Z","lastTransitionTime":"2025-11-25T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.287991 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.288080 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.288110 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.288138 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.288156 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:47Z","lastTransitionTime":"2025-11-25T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.391372 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.391446 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.391468 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.391495 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.391514 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:47Z","lastTransitionTime":"2025-11-25T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.494975 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.495049 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.495074 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.495105 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.495128 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:47Z","lastTransitionTime":"2025-11-25T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.599326 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.599393 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.599411 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.599438 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.599458 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:47Z","lastTransitionTime":"2025-11-25T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.703349 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.703422 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.703441 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.703471 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.703496 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:47Z","lastTransitionTime":"2025-11-25T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.784814 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.784899 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:47 crc kubenswrapper[4800]: E1125 15:18:47.785132 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:47 crc kubenswrapper[4800]: E1125 15:18:47.785403 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.807108 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.807190 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.807216 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.807253 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.807279 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:47Z","lastTransitionTime":"2025-11-25T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.911885 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.911962 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.911985 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.912017 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:47 crc kubenswrapper[4800]: I1125 15:18:47.912038 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:47Z","lastTransitionTime":"2025-11-25T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.015341 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.015406 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.015423 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.015450 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.015469 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:48Z","lastTransitionTime":"2025-11-25T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.119078 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.119135 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.119154 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.119181 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.119198 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:48Z","lastTransitionTime":"2025-11-25T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.222485 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.222528 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.222536 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.222553 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.222562 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:48Z","lastTransitionTime":"2025-11-25T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.325496 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.325539 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.325551 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.325567 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.325577 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:48Z","lastTransitionTime":"2025-11-25T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.428631 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.428675 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.428684 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.428701 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.428711 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:48Z","lastTransitionTime":"2025-11-25T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.531542 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.531608 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.531630 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.531664 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.531733 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:48Z","lastTransitionTime":"2025-11-25T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.589986 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.590080 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.590101 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.590127 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.590146 4800 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T15:18:48Z","lastTransitionTime":"2025-11-25T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.656709 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6"] Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.657151 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.659377 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.660018 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.661003 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.661477 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.689977 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ab7c611-b524-44b1-b8ec-7e14630fdd89-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.690091 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2ab7c611-b524-44b1-b8ec-7e14630fdd89-service-ca\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.690173 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ab7c611-b524-44b1-b8ec-7e14630fdd89-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.690249 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/2ab7c611-b524-44b1-b8ec-7e14630fdd89-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.690287 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/2ab7c611-b524-44b1-b8ec-7e14630fdd89-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.690874 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=3.690802333 podStartE2EDuration="3.690802333s" podCreationTimestamp="2025-11-25 15:18:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:48.689649255 +0000 UTC m=+89.744057747" watchObservedRunningTime="2025-11-25 15:18:48.690802333 +0000 UTC m=+89.745210855" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.744330 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-6qf5g" podStartSLOduration=66.744300606 podStartE2EDuration="1m6.744300606s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:48.728233455 +0000 UTC m=+89.782641967" watchObservedRunningTime="2025-11-25 15:18:48.744300606 +0000 UTC m=+89.798709098" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.744768 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-nzxgf" podStartSLOduration=66.744759358 podStartE2EDuration="1m6.744759358s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:48.744221925 +0000 UTC m=+89.798630407" watchObservedRunningTime="2025-11-25 15:18:48.744759358 +0000 UTC m=+89.799167850" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.785445 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.785464 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:48 crc kubenswrapper[4800]: E1125 15:18:48.785974 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.787530 4800 scope.go:117] "RemoveContainer" containerID="6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415" Nov 25 15:18:48 crc kubenswrapper[4800]: E1125 15:18:48.787539 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.789129 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=62.789083877 podStartE2EDuration="1m2.789083877s" podCreationTimestamp="2025-11-25 15:17:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:48.78842066 +0000 UTC m=+89.842829172" watchObservedRunningTime="2025-11-25 15:18:48.789083877 +0000 UTC m=+89.843492369" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.791680 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ab7c611-b524-44b1-b8ec-7e14630fdd89-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.791727 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2ab7c611-b524-44b1-b8ec-7e14630fdd89-service-ca\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.791762 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ab7c611-b524-44b1-b8ec-7e14630fdd89-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.791810 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/2ab7c611-b524-44b1-b8ec-7e14630fdd89-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.791879 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/2ab7c611-b524-44b1-b8ec-7e14630fdd89-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.791960 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/2ab7c611-b524-44b1-b8ec-7e14630fdd89-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.793431 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2ab7c611-b524-44b1-b8ec-7e14630fdd89-service-ca\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.793957 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/2ab7c611-b524-44b1-b8ec-7e14630fdd89-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.802815 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ab7c611-b524-44b1-b8ec-7e14630fdd89-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.811001 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ab7c611-b524-44b1-b8ec-7e14630fdd89-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-qdrr6\" (UID: \"2ab7c611-b524-44b1-b8ec-7e14630fdd89\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.833500 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=35.833473837 podStartE2EDuration="35.833473837s" podCreationTimestamp="2025-11-25 15:18:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:48.818670977 +0000 UTC m=+89.873079479" watchObservedRunningTime="2025-11-25 15:18:48.833473837 +0000 UTC m=+89.887882339" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.854608 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-84zhh" podStartSLOduration=66.854586032 podStartE2EDuration="1m6.854586032s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:48.85332421 +0000 UTC m=+89.907732712" watchObservedRunningTime="2025-11-25 15:18:48.854586032 +0000 UTC m=+89.908994534" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.872987 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-895nk" podStartSLOduration=66.872966069 podStartE2EDuration="1m6.872966069s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:48.87261204 +0000 UTC m=+89.927020522" watchObservedRunningTime="2025-11-25 15:18:48.872966069 +0000 UTC m=+89.927374571" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.890923 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=67.890902236 podStartE2EDuration="1m7.890902236s" podCreationTimestamp="2025-11-25 15:17:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:48.890813214 +0000 UTC m=+89.945221706" watchObservedRunningTime="2025-11-25 15:18:48.890902236 +0000 UTC m=+89.945310728" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.949159 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-6tshx" podStartSLOduration=68.949139654 podStartE2EDuration="1m8.949139654s" podCreationTimestamp="2025-11-25 15:17:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:48.94816216 +0000 UTC m=+90.002570662" watchObservedRunningTime="2025-11-25 15:18:48.949139654 +0000 UTC m=+90.003548136" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.960292 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=12.960269615 podStartE2EDuration="12.960269615s" podCreationTimestamp="2025-11-25 15:18:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:48.958673636 +0000 UTC m=+90.013082128" watchObservedRunningTime="2025-11-25 15:18:48.960269615 +0000 UTC m=+90.014678107" Nov 25 15:18:48 crc kubenswrapper[4800]: I1125 15:18:48.978022 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" Nov 25 15:18:49 crc kubenswrapper[4800]: I1125 15:18:49.001616 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podStartSLOduration=67.001595941 podStartE2EDuration="1m7.001595941s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:49.000414142 +0000 UTC m=+90.054822634" watchObservedRunningTime="2025-11-25 15:18:49.001595941 +0000 UTC m=+90.056004433" Nov 25 15:18:49 crc kubenswrapper[4800]: I1125 15:18:49.150222 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovnkube-controller/2.log" Nov 25 15:18:49 crc kubenswrapper[4800]: I1125 15:18:49.154372 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerStarted","Data":"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6"} Nov 25 15:18:49 crc kubenswrapper[4800]: I1125 15:18:49.155051 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:18:49 crc kubenswrapper[4800]: I1125 15:18:49.155419 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" event={"ID":"2ab7c611-b524-44b1-b8ec-7e14630fdd89","Type":"ContainerStarted","Data":"78bc2ff833f15c44dc68f55a3d58dd964dac4b361a3bd24efda670c619e025ec"} Nov 25 15:18:49 crc kubenswrapper[4800]: I1125 15:18:49.182324 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podStartSLOduration=67.182303442 podStartE2EDuration="1m7.182303442s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:49.181442581 +0000 UTC m=+90.235851073" watchObservedRunningTime="2025-11-25 15:18:49.182303442 +0000 UTC m=+90.236711944" Nov 25 15:18:49 crc kubenswrapper[4800]: I1125 15:18:49.785407 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:49 crc kubenswrapper[4800]: E1125 15:18:49.786568 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:49 crc kubenswrapper[4800]: I1125 15:18:49.786719 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:49 crc kubenswrapper[4800]: E1125 15:18:49.786935 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:50 crc kubenswrapper[4800]: I1125 15:18:50.005601 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-fjqzf"] Nov 25 15:18:50 crc kubenswrapper[4800]: I1125 15:18:50.160770 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" event={"ID":"2ab7c611-b524-44b1-b8ec-7e14630fdd89","Type":"ContainerStarted","Data":"72da46c4a1cf7fee574ecc2d3338d0174306cc48dfcb50c0db337ace478ef592"} Nov 25 15:18:50 crc kubenswrapper[4800]: I1125 15:18:50.160817 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:50 crc kubenswrapper[4800]: E1125 15:18:50.160982 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:50 crc kubenswrapper[4800]: I1125 15:18:50.785119 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:50 crc kubenswrapper[4800]: I1125 15:18:50.785183 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:50 crc kubenswrapper[4800]: E1125 15:18:50.785285 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:50 crc kubenswrapper[4800]: E1125 15:18:50.785391 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:51 crc kubenswrapper[4800]: I1125 15:18:51.784461 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:51 crc kubenswrapper[4800]: E1125 15:18:51.785212 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 15:18:51 crc kubenswrapper[4800]: I1125 15:18:51.784461 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:51 crc kubenswrapper[4800]: E1125 15:18:51.785653 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fjqzf" podUID="3e0ee245-1a7f-4428-bbd9-50de79d2cbd8" Nov 25 15:18:52 crc kubenswrapper[4800]: I1125 15:18:52.786008 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:52 crc kubenswrapper[4800]: I1125 15:18:52.786067 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:52 crc kubenswrapper[4800]: E1125 15:18:52.786298 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 15:18:52 crc kubenswrapper[4800]: E1125 15:18:52.786542 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.194138 4800 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.194362 4800 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.231157 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qdrr6" podStartSLOduration=71.231138902 podStartE2EDuration="1m11.231138902s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:51.186181727 +0000 UTC m=+92.240590249" watchObservedRunningTime="2025-11-25 15:18:53.231138902 +0000 UTC m=+94.285547384" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.232041 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5mtjz"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.232459 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.236963 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.237335 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.237597 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.237918 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.238396 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.238725 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.240910 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.241075 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.241196 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.241288 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.241213 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.241478 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.250502 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.250554 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.250710 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.250726 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.251068 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.251236 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.252345 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.252813 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.253418 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.254968 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.255201 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.255317 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.255441 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.256629 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.256756 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.256905 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.257032 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.257159 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.257379 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-gjqqh"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.257562 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.258465 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-q5x2z"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.259382 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-q5x2z" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.259637 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.260523 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.261058 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.261193 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.262040 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.277459 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.277672 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.277741 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.277911 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.277967 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.278448 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.278482 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.278623 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.278885 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.277680 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.280223 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.280418 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.280532 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-sx8kw"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.281496 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.281731 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-bm2fk"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.283180 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.283724 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.290660 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.290967 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.291028 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.291657 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.294075 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-dkmth"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.295333 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.295805 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.322193 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.322497 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.322743 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.322748 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.322914 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.323025 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.323131 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.323169 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.322943 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.323428 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.323605 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.323612 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-r4fbv"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.324087 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.324265 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.324446 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.325056 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.325347 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.325437 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rfhb2"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.325997 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-rfhb2" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.327547 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-mbjjh"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.327922 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.328675 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.329031 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.329162 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.329526 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7txz7"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.330205 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.329594 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.330767 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.329964 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.330506 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.331051 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.331180 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.331723 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.333123 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-t52ch"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.333862 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.334329 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.334464 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9rpcj"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.334800 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.335003 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.338048 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.338534 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-ftqrc"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.338086 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.341688 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.338355 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342369 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cc06d61c-999f-4431-90a4-1fb72e759925-audit-dir\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342394 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/09fb6a8e-92e7-4f23-8d16-6b8616759965-bound-sa-token\") pod \"ingress-operator-5b745b69d9-r7cmf\" (UID: \"09fb6a8e-92e7-4f23-8d16-6b8616759965\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342417 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21056d5d-5bd1-4ab2-9f9c-8c6cb6212391-config\") pod \"machine-approver-56656f9798-hc9bz\" (UID: \"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342440 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sv6wf\" (UniqueName: \"kubernetes.io/projected/0329df6e-d8e5-4b22-be13-f934904b0ae7-kube-api-access-sv6wf\") pod \"openshift-apiserver-operator-796bbdcf4f-7njxz\" (UID: \"0329df6e-d8e5-4b22-be13-f934904b0ae7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342447 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342460 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0329df6e-d8e5-4b22-be13-f934904b0ae7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-7njxz\" (UID: \"0329df6e-d8e5-4b22-be13-f934904b0ae7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342480 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-serving-cert\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342496 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smc6g\" (UniqueName: \"kubernetes.io/projected/b228d866-5740-4a89-82b3-53c6272a70cc-kube-api-access-smc6g\") pod \"openshift-controller-manager-operator-756b6f6bc6-6prpd\" (UID: \"b228d866-5740-4a89-82b3-53c6272a70cc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342517 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtlw2\" (UniqueName: \"kubernetes.io/projected/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-kube-api-access-qtlw2\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342545 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-serving-cert\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342568 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pxjv\" (UniqueName: \"kubernetes.io/projected/81f88e63-467c-4356-bb2b-b5aa9d93f512-kube-api-access-9pxjv\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342586 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342601 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb5nn\" (UniqueName: \"kubernetes.io/projected/09fb6a8e-92e7-4f23-8d16-6b8616759965-kube-api-access-zb5nn\") pod \"ingress-operator-5b745b69d9-r7cmf\" (UID: \"09fb6a8e-92e7-4f23-8d16-6b8616759965\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342617 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-config\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342631 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-config\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342646 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342664 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342680 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a3f6160b-061b-4d7b-beac-5873f6c0192c-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342701 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b2446cfe-e233-473a-a837-40fe268aa5dc-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-bfj8f\" (UID: \"b2446cfe-e233-473a-a837-40fe268aa5dc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342718 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/cc06d61c-999f-4431-90a4-1fb72e759925-node-pullsecrets\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342746 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a3f6160b-061b-4d7b-beac-5873f6c0192c-encryption-config\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342764 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49v92\" (UniqueName: \"kubernetes.io/projected/b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb-kube-api-access-49v92\") pod \"dns-operator-744455d44c-rfhb2\" (UID: \"b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb\") " pod="openshift-dns-operator/dns-operator-744455d44c-rfhb2" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342786 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-trusted-ca-bundle\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342806 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98rkq\" (UniqueName: \"kubernetes.io/projected/35aecb5c-7326-4273-9cd4-0820f1ee32b4-kube-api-access-98rkq\") pod \"cluster-samples-operator-665b6dd947-cr942\" (UID: \"35aecb5c-7326-4273-9cd4-0820f1ee32b4\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342824 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-trusted-ca-bundle\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342846 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342880 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b2446cfe-e233-473a-a837-40fe268aa5dc-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-bfj8f\" (UID: \"b2446cfe-e233-473a-a837-40fe268aa5dc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342900 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9d93348-48bd-40a5-a930-11745b4ba869-serving-cert\") pod \"console-operator-58897d9998-bm2fk\" (UID: \"e9d93348-48bd-40a5-a930-11745b4ba869\") " pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342903 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342933 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-config\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342953 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4c143db6-2d6b-49bd-987b-a3fbacb8a562-available-featuregates\") pod \"openshift-config-operator-7777fb866f-nqpgj\" (UID: \"4c143db6-2d6b-49bd-987b-a3fbacb8a562\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342971 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fh2dl\" (UniqueName: \"kubernetes.io/projected/e9d93348-48bd-40a5-a930-11745b4ba869-kube-api-access-fh2dl\") pod \"console-operator-58897d9998-bm2fk\" (UID: \"e9d93348-48bd-40a5-a930-11745b4ba869\") " pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342995 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343017 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343039 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343055 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6160b-061b-4d7b-beac-5873f6c0192c-audit-dir\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343070 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9d93348-48bd-40a5-a930-11745b4ba869-config\") pod \"console-operator-58897d9998-bm2fk\" (UID: \"e9d93348-48bd-40a5-a930-11745b4ba869\") " pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343092 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-oauth-config\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343112 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqnc5\" (UniqueName: \"kubernetes.io/projected/4c143db6-2d6b-49bd-987b-a3fbacb8a562-kube-api-access-zqnc5\") pod \"openshift-config-operator-7777fb866f-nqpgj\" (UID: \"4c143db6-2d6b-49bd-987b-a3fbacb8a562\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343121 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ftqrc" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343129 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e9d93348-48bd-40a5-a930-11745b4ba869-trusted-ca\") pod \"console-operator-58897d9998-bm2fk\" (UID: \"e9d93348-48bd-40a5-a930-11745b4ba869\") " pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343150 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/35aecb5c-7326-4273-9cd4-0820f1ee32b4-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-cr942\" (UID: \"35aecb5c-7326-4273-9cd4-0820f1ee32b4\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343169 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6w9sd\" (UniqueName: \"kubernetes.io/projected/cc06d61c-999f-4431-90a4-1fb72e759925-kube-api-access-6w9sd\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343188 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/09fb6a8e-92e7-4f23-8d16-6b8616759965-metrics-tls\") pod \"ingress-operator-5b745b69d9-r7cmf\" (UID: \"09fb6a8e-92e7-4f23-8d16-6b8616759965\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343205 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/21056d5d-5bd1-4ab2-9f9c-8c6cb6212391-machine-approver-tls\") pod \"machine-approver-56656f9798-hc9bz\" (UID: \"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343223 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343252 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b2446cfe-e233-473a-a837-40fe268aa5dc-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-bfj8f\" (UID: \"b2446cfe-e233-473a-a837-40fe268aa5dc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343272 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343290 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343307 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a3f6160b-061b-4d7b-beac-5873f6c0192c-etcd-client\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343328 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56f1dabd-4d11-4dc4-9961-efac4124e4a5-audit-dir\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343346 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-etcd-serving-ca\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343361 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/cc06d61c-999f-4431-90a4-1fb72e759925-encryption-config\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343377 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmrhx\" (UniqueName: \"kubernetes.io/projected/6f015c93-38f5-4f11-9f72-6d99259e4058-kube-api-access-pmrhx\") pod \"downloads-7954f5f757-q5x2z\" (UID: \"6f015c93-38f5-4f11-9f72-6d99259e4058\") " pod="openshift-console/downloads-7954f5f757-q5x2z" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343394 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/21056d5d-5bd1-4ab2-9f9c-8c6cb6212391-auth-proxy-config\") pod \"machine-approver-56656f9798-hc9bz\" (UID: \"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343441 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb-metrics-tls\") pod \"dns-operator-744455d44c-rfhb2\" (UID: \"b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb\") " pod="openshift-dns-operator/dns-operator-744455d44c-rfhb2" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343465 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c143db6-2d6b-49bd-987b-a3fbacb8a562-serving-cert\") pod \"openshift-config-operator-7777fb866f-nqpgj\" (UID: \"4c143db6-2d6b-49bd-987b-a3fbacb8a562\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343484 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2swm\" (UniqueName: \"kubernetes.io/projected/a3f6160b-061b-4d7b-beac-5873f6c0192c-kube-api-access-d2swm\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343510 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a3f6160b-061b-4d7b-beac-5873f6c0192c-audit-policies\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343587 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-serving-cert\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343775 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-oauth-serving-cert\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343825 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cc06d61c-999f-4431-90a4-1fb72e759925-etcd-client\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343868 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343889 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0329df6e-d8e5-4b22-be13-f934904b0ae7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-7njxz\" (UID: \"0329df6e-d8e5-4b22-be13-f934904b0ae7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343907 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.338401 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343998 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.343940 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a3f6160b-061b-4d7b-beac-5873f6c0192c-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.338460 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.342789 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.344194 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.344205 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.344312 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.354122 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.354365 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.344319 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.357162 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.358375 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.362939 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.362989 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.344435 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/09fb6a8e-92e7-4f23-8d16-6b8616759965-trusted-ca\") pod \"ingress-operator-5b745b69d9-r7cmf\" (UID: \"09fb6a8e-92e7-4f23-8d16-6b8616759965\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363395 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-audit-policies\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363430 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-client-ca\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363487 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prh2v\" (UniqueName: \"kubernetes.io/projected/21056d5d-5bd1-4ab2-9f9c-8c6cb6212391-kube-api-access-prh2v\") pod \"machine-approver-56656f9798-hc9bz\" (UID: \"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363515 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-service-ca\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363548 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3f6160b-061b-4d7b-beac-5873f6c0192c-serving-cert\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363572 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9tqq\" (UniqueName: \"kubernetes.io/projected/56f1dabd-4d11-4dc4-9961-efac4124e4a5-kube-api-access-j9tqq\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363594 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b228d866-5740-4a89-82b3-53c6272a70cc-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-6prpd\" (UID: \"b228d866-5740-4a89-82b3-53c6272a70cc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363615 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2z45m\" (UniqueName: \"kubernetes.io/projected/b2446cfe-e233-473a-a837-40fe268aa5dc-kube-api-access-2z45m\") pod \"cluster-image-registry-operator-dc59b4c8b-bfj8f\" (UID: \"b2446cfe-e233-473a-a837-40fe268aa5dc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363643 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-service-ca-bundle\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363666 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-config\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363689 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-audit\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363708 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-image-import-ca\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363727 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc06d61c-999f-4431-90a4-1fb72e759925-serving-cert\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363807 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jwlr\" (UniqueName: \"kubernetes.io/projected/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-kube-api-access-7jwlr\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.363844 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b228d866-5740-4a89-82b3-53c6272a70cc-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-6prpd\" (UID: \"b228d866-5740-4a89-82b3-53c6272a70cc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.367644 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.371218 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-lmlfk"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.372511 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-27frf"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.371748 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.376683 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-lmlfk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.378507 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.379140 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.379432 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.379930 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.380054 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.380167 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.381192 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.381280 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.381465 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.381482 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.381565 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.381687 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.381795 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.393914 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.394086 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.394287 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.394389 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.394537 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.394665 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.394754 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.399827 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.401410 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.401745 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.402086 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.402432 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-92c5c"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.402814 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.403111 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.403358 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.403369 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.403461 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.403873 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.404492 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.406057 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.406370 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.406888 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.407205 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.407809 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.413671 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.416049 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.423816 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.426958 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.429446 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.430159 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.430270 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.437003 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.438018 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.438094 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-8stkc"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.439139 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.441499 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5h48t"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.442895 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.442977 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.450471 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.451737 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5mtjz"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.451895 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.453153 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.455666 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.458210 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-57652"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.460017 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-57652" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.462031 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xmp86"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.465607 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fh2dl\" (UniqueName: \"kubernetes.io/projected/e9d93348-48bd-40a5-a930-11745b4ba869-kube-api-access-fh2dl\") pod \"console-operator-58897d9998-bm2fk\" (UID: \"e9d93348-48bd-40a5-a930-11745b4ba869\") " pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.465703 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-config\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.465747 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4c143db6-2d6b-49bd-987b-a3fbacb8a562-available-featuregates\") pod \"openshift-config-operator-7777fb866f-nqpgj\" (UID: \"4c143db6-2d6b-49bd-987b-a3fbacb8a562\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.465785 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.465820 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.465872 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466008 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6160b-061b-4d7b-beac-5873f6c0192c-audit-dir\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466075 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9d93348-48bd-40a5-a930-11745b4ba869-config\") pod \"console-operator-58897d9998-bm2fk\" (UID: \"e9d93348-48bd-40a5-a930-11745b4ba869\") " pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466117 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqnc5\" (UniqueName: \"kubernetes.io/projected/4c143db6-2d6b-49bd-987b-a3fbacb8a562-kube-api-access-zqnc5\") pod \"openshift-config-operator-7777fb866f-nqpgj\" (UID: \"4c143db6-2d6b-49bd-987b-a3fbacb8a562\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466143 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e9d93348-48bd-40a5-a930-11745b4ba869-trusted-ca\") pod \"console-operator-58897d9998-bm2fk\" (UID: \"e9d93348-48bd-40a5-a930-11745b4ba869\") " pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466205 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-oauth-config\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466230 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/09fb6a8e-92e7-4f23-8d16-6b8616759965-metrics-tls\") pod \"ingress-operator-5b745b69d9-r7cmf\" (UID: \"09fb6a8e-92e7-4f23-8d16-6b8616759965\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466259 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/21056d5d-5bd1-4ab2-9f9c-8c6cb6212391-machine-approver-tls\") pod \"machine-approver-56656f9798-hc9bz\" (UID: \"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466281 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466326 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/35aecb5c-7326-4273-9cd4-0820f1ee32b4-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-cr942\" (UID: \"35aecb5c-7326-4273-9cd4-0820f1ee32b4\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466352 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6w9sd\" (UniqueName: \"kubernetes.io/projected/cc06d61c-999f-4431-90a4-1fb72e759925-kube-api-access-6w9sd\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466382 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b2446cfe-e233-473a-a837-40fe268aa5dc-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-bfj8f\" (UID: \"b2446cfe-e233-473a-a837-40fe268aa5dc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466407 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466431 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466458 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56f1dabd-4d11-4dc4-9961-efac4124e4a5-audit-dir\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466481 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a3f6160b-061b-4d7b-beac-5873f6c0192c-etcd-client\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466509 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/cc06d61c-999f-4431-90a4-1fb72e759925-encryption-config\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.466532 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmrhx\" (UniqueName: \"kubernetes.io/projected/6f015c93-38f5-4f11-9f72-6d99259e4058-kube-api-access-pmrhx\") pod \"downloads-7954f5f757-q5x2z\" (UID: \"6f015c93-38f5-4f11-9f72-6d99259e4058\") " pod="openshift-console/downloads-7954f5f757-q5x2z" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.467502 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-mbjjh"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.468037 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469212 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-config\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469245 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/21056d5d-5bd1-4ab2-9f9c-8c6cb6212391-auth-proxy-config\") pod \"machine-approver-56656f9798-hc9bz\" (UID: \"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469349 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-etcd-serving-ca\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469380 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb-metrics-tls\") pod \"dns-operator-744455d44c-rfhb2\" (UID: \"b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb\") " pod="openshift-dns-operator/dns-operator-744455d44c-rfhb2" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469419 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c143db6-2d6b-49bd-987b-a3fbacb8a562-serving-cert\") pod \"openshift-config-operator-7777fb866f-nqpgj\" (UID: \"4c143db6-2d6b-49bd-987b-a3fbacb8a562\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469463 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4c143db6-2d6b-49bd-987b-a3fbacb8a562-available-featuregates\") pod \"openshift-config-operator-7777fb866f-nqpgj\" (UID: \"4c143db6-2d6b-49bd-987b-a3fbacb8a562\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469475 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a3f6160b-061b-4d7b-beac-5873f6c0192c-audit-policies\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469592 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2swm\" (UniqueName: \"kubernetes.io/projected/a3f6160b-061b-4d7b-beac-5873f6c0192c-kube-api-access-d2swm\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469636 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-oauth-serving-cert\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469669 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-serving-cert\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469705 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cc06d61c-999f-4431-90a4-1fb72e759925-etcd-client\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469775 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469798 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a3f6160b-061b-4d7b-beac-5873f6c0192c-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469828 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0329df6e-d8e5-4b22-be13-f934904b0ae7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-7njxz\" (UID: \"0329df6e-d8e5-4b22-be13-f934904b0ae7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469878 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469907 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-client-ca\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469961 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/09fb6a8e-92e7-4f23-8d16-6b8616759965-trusted-ca\") pod \"ingress-operator-5b745b69d9-r7cmf\" (UID: \"09fb6a8e-92e7-4f23-8d16-6b8616759965\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.469988 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-audit-policies\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470044 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prh2v\" (UniqueName: \"kubernetes.io/projected/21056d5d-5bd1-4ab2-9f9c-8c6cb6212391-kube-api-access-prh2v\") pod \"machine-approver-56656f9798-hc9bz\" (UID: \"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470069 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-service-ca\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470092 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3f6160b-061b-4d7b-beac-5873f6c0192c-serving-cert\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470137 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b228d866-5740-4a89-82b3-53c6272a70cc-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-6prpd\" (UID: \"b228d866-5740-4a89-82b3-53c6272a70cc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470160 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9tqq\" (UniqueName: \"kubernetes.io/projected/56f1dabd-4d11-4dc4-9961-efac4124e4a5-kube-api-access-j9tqq\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470183 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-service-ca-bundle\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470205 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-config\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470246 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2z45m\" (UniqueName: \"kubernetes.io/projected/b2446cfe-e233-473a-a837-40fe268aa5dc-kube-api-access-2z45m\") pod \"cluster-image-registry-operator-dc59b4c8b-bfj8f\" (UID: \"b2446cfe-e233-473a-a837-40fe268aa5dc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470270 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-audit\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470299 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-image-import-ca\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470468 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc06d61c-999f-4431-90a4-1fb72e759925-serving-cert\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470502 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jwlr\" (UniqueName: \"kubernetes.io/projected/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-kube-api-access-7jwlr\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470548 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b228d866-5740-4a89-82b3-53c6272a70cc-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-6prpd\" (UID: \"b228d866-5740-4a89-82b3-53c6272a70cc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470591 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470630 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21056d5d-5bd1-4ab2-9f9c-8c6cb6212391-config\") pod \"machine-approver-56656f9798-hc9bz\" (UID: \"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470656 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sv6wf\" (UniqueName: \"kubernetes.io/projected/0329df6e-d8e5-4b22-be13-f934904b0ae7-kube-api-access-sv6wf\") pod \"openshift-apiserver-operator-796bbdcf4f-7njxz\" (UID: \"0329df6e-d8e5-4b22-be13-f934904b0ae7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470696 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cc06d61c-999f-4431-90a4-1fb72e759925-audit-dir\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470716 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/09fb6a8e-92e7-4f23-8d16-6b8616759965-bound-sa-token\") pod \"ingress-operator-5b745b69d9-r7cmf\" (UID: \"09fb6a8e-92e7-4f23-8d16-6b8616759965\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470740 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smc6g\" (UniqueName: \"kubernetes.io/projected/b228d866-5740-4a89-82b3-53c6272a70cc-kube-api-access-smc6g\") pod \"openshift-controller-manager-operator-756b6f6bc6-6prpd\" (UID: \"b228d866-5740-4a89-82b3-53c6272a70cc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470794 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0329df6e-d8e5-4b22-be13-f934904b0ae7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-7njxz\" (UID: \"0329df6e-d8e5-4b22-be13-f934904b0ae7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470817 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-serving-cert\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470867 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtlw2\" (UniqueName: \"kubernetes.io/projected/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-kube-api-access-qtlw2\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470895 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-serving-cert\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470957 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb5nn\" (UniqueName: \"kubernetes.io/projected/09fb6a8e-92e7-4f23-8d16-6b8616759965-kube-api-access-zb5nn\") pod \"ingress-operator-5b745b69d9-r7cmf\" (UID: \"09fb6a8e-92e7-4f23-8d16-6b8616759965\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.470982 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-config\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471025 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pxjv\" (UniqueName: \"kubernetes.io/projected/81f88e63-467c-4356-bb2b-b5aa9d93f512-kube-api-access-9pxjv\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471052 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471075 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471138 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a3f6160b-061b-4d7b-beac-5873f6c0192c-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471156 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b2446cfe-e233-473a-a837-40fe268aa5dc-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-bfj8f\" (UID: \"b2446cfe-e233-473a-a837-40fe268aa5dc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471204 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/cc06d61c-999f-4431-90a4-1fb72e759925-node-pullsecrets\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471227 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-config\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471269 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471296 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-trusted-ca-bundle\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471314 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a3f6160b-061b-4d7b-beac-5873f6c0192c-encryption-config\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471359 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49v92\" (UniqueName: \"kubernetes.io/projected/b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb-kube-api-access-49v92\") pod \"dns-operator-744455d44c-rfhb2\" (UID: \"b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb\") " pod="openshift-dns-operator/dns-operator-744455d44c-rfhb2" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471381 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471425 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b2446cfe-e233-473a-a837-40fe268aa5dc-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-bfj8f\" (UID: \"b2446cfe-e233-473a-a837-40fe268aa5dc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471445 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9d93348-48bd-40a5-a930-11745b4ba869-serving-cert\") pod \"console-operator-58897d9998-bm2fk\" (UID: \"e9d93348-48bd-40a5-a930-11745b4ba869\") " pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471504 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98rkq\" (UniqueName: \"kubernetes.io/projected/35aecb5c-7326-4273-9cd4-0820f1ee32b4-kube-api-access-98rkq\") pod \"cluster-samples-operator-665b6dd947-cr942\" (UID: \"35aecb5c-7326-4273-9cd4-0820f1ee32b4\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.471529 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-trusted-ca-bundle\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.472215 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e9d93348-48bd-40a5-a930-11745b4ba869-trusted-ca\") pod \"console-operator-58897d9998-bm2fk\" (UID: \"e9d93348-48bd-40a5-a930-11745b4ba869\") " pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.472373 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.472997 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.473103 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6160b-061b-4d7b-beac-5873f6c0192c-audit-dir\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.473196 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/21056d5d-5bd1-4ab2-9f9c-8c6cb6212391-auth-proxy-config\") pod \"machine-approver-56656f9798-hc9bz\" (UID: \"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.473461 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-trusted-ca-bundle\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.473974 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9d93348-48bd-40a5-a930-11745b4ba869-config\") pod \"console-operator-58897d9998-bm2fk\" (UID: \"e9d93348-48bd-40a5-a930-11745b4ba869\") " pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.473998 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-etcd-serving-ca\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.474915 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a3f6160b-061b-4d7b-beac-5873f6c0192c-audit-policies\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.476031 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.476074 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.477309 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-oauth-serving-cert\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.478508 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.480516 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/21056d5d-5bd1-4ab2-9f9c-8c6cb6212391-machine-approver-tls\") pod \"machine-approver-56656f9798-hc9bz\" (UID: \"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.480498 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-oauth-config\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.480539 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b2446cfe-e233-473a-a837-40fe268aa5dc-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-bfj8f\" (UID: \"b2446cfe-e233-473a-a837-40fe268aa5dc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.481071 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.481616 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-serving-cert\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.481808 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.483102 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-swkh6"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.483975 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56f1dabd-4d11-4dc4-9961-efac4124e4a5-audit-dir\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.485895 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/09fb6a8e-92e7-4f23-8d16-6b8616759965-metrics-tls\") pod \"ingress-operator-5b745b69d9-r7cmf\" (UID: \"09fb6a8e-92e7-4f23-8d16-6b8616759965\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.486422 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.486544 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-swkh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.487662 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-gjqqh"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.487678 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.487847 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-config\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.488055 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-service-ca-bundle\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.488189 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.488958 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-service-ca\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.489535 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/35aecb5c-7326-4273-9cd4-0820f1ee32b4-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-cr942\" (UID: \"35aecb5c-7326-4273-9cd4-0820f1ee32b4\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.490075 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.490514 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-audit-policies\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.490766 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.491017 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cc06d61c-999f-4431-90a4-1fb72e759925-audit-dir\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.491087 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.491331 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/cc06d61c-999f-4431-90a4-1fb72e759925-node-pullsecrets\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.491713 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-config\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.492312 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21056d5d-5bd1-4ab2-9f9c-8c6cb6212391-config\") pod \"machine-approver-56656f9798-hc9bz\" (UID: \"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.492594 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-config\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.493319 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-audit\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.493667 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cc06d61c-999f-4431-90a4-1fb72e759925-etcd-client\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.494789 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c143db6-2d6b-49bd-987b-a3fbacb8a562-serving-cert\") pod \"openshift-config-operator-7777fb866f-nqpgj\" (UID: \"4c143db6-2d6b-49bd-987b-a3fbacb8a562\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.494984 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b228d866-5740-4a89-82b3-53c6272a70cc-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-6prpd\" (UID: \"b228d866-5740-4a89-82b3-53c6272a70cc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.495145 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-serving-cert\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.495173 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/cc06d61c-999f-4431-90a4-1fb72e759925-image-import-ca\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.495401 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9rpcj"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.495433 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.495447 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7txz7"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.495636 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0329df6e-d8e5-4b22-be13-f934904b0ae7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-7njxz\" (UID: \"0329df6e-d8e5-4b22-be13-f934904b0ae7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.495834 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.496110 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.496706 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/cc06d61c-999f-4431-90a4-1fb72e759925-encryption-config\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.497423 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.498454 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc06d61c-999f-4431-90a4-1fb72e759925-serving-cert\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.498662 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-trusted-ca-bundle\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.498778 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.498938 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-serving-cert\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.499384 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a3f6160b-061b-4d7b-beac-5873f6c0192c-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.500096 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/09fb6a8e-92e7-4f23-8d16-6b8616759965-trusted-ca\") pod \"ingress-operator-5b745b69d9-r7cmf\" (UID: \"09fb6a8e-92e7-4f23-8d16-6b8616759965\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.500138 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.500164 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9d93348-48bd-40a5-a930-11745b4ba869-serving-cert\") pod \"console-operator-58897d9998-bm2fk\" (UID: \"e9d93348-48bd-40a5-a930-11745b4ba869\") " pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.500164 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a3f6160b-061b-4d7b-beac-5873f6c0192c-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.500828 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-bm2fk"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.501638 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b228d866-5740-4a89-82b3-53c6272a70cc-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-6prpd\" (UID: \"b228d866-5740-4a89-82b3-53c6272a70cc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.502700 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b2446cfe-e233-473a-a837-40fe268aa5dc-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-bfj8f\" (UID: \"b2446cfe-e233-473a-a837-40fe268aa5dc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.503128 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-r4fbv"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.503177 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a3f6160b-061b-4d7b-beac-5873f6c0192c-encryption-config\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.503342 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb-metrics-tls\") pod \"dns-operator-744455d44c-rfhb2\" (UID: \"b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb\") " pod="openshift-dns-operator/dns-operator-744455d44c-rfhb2" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.503391 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a3f6160b-061b-4d7b-beac-5873f6c0192c-etcd-client\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.504016 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3f6160b-061b-4d7b-beac-5873f6c0192c-serving-cert\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.504125 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.505166 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.505546 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-client-ca\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.506175 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.506650 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.506919 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-dkmth"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.508105 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-lmlfk"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.509285 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-ftqrc"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.510231 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.511243 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.512267 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-8stkc"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.513466 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.513614 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0329df6e-d8e5-4b22-be13-f934904b0ae7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-7njxz\" (UID: \"0329df6e-d8e5-4b22-be13-f934904b0ae7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.515385 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.516577 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.518146 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-q5x2z"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.520673 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-27frf"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.522467 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-sx8kw"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.524033 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.525128 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rfhb2"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.526136 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.527208 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5h48t"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.528276 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-gn26w"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.529354 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.529479 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gn26w" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.530574 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-57652"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.531619 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-92c5c"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.532830 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xmp86"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.534038 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.535256 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.535451 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.536589 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-gn26w"] Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.562050 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.575051 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.615285 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.635149 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.655867 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.675696 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.695179 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.716082 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.736095 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.755825 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.774772 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.784720 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.784761 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.795133 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.816055 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.835705 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.855506 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.875942 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.895311 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.917278 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.940472 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.956711 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.976264 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 15:18:53 crc kubenswrapper[4800]: I1125 15:18:53.996272 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.015790 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.036511 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.055688 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.075444 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.096234 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.114545 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.135352 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.155459 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.175084 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.195224 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.215525 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.236346 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.257289 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.276553 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.296432 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.316534 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.335353 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.356022 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.376670 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.402083 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.413605 4800 request.go:700] Waited for 1.009679676s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmachine-config-controller-dockercfg-c2lfx&limit=500&resourceVersion=0 Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.415971 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.435477 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.456222 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.475720 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.496372 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.515676 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.536265 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.556301 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.576179 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.596501 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.616199 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.636669 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.656474 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.675343 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.695259 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.715319 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.735438 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.756183 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.775226 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.784742 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.784820 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.799724 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.815567 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.836207 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.859385 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.875971 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.895063 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.915428 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.935264 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.961966 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.976486 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 15:18:54 crc kubenswrapper[4800]: I1125 15:18:54.997195 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.016169 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.037139 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.057297 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.076178 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.096885 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.116170 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.153623 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmrhx\" (UniqueName: \"kubernetes.io/projected/6f015c93-38f5-4f11-9f72-6d99259e4058-kube-api-access-pmrhx\") pod \"downloads-7954f5f757-q5x2z\" (UID: \"6f015c93-38f5-4f11-9f72-6d99259e4058\") " pod="openshift-console/downloads-7954f5f757-q5x2z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.175678 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.176594 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6w9sd\" (UniqueName: \"kubernetes.io/projected/cc06d61c-999f-4431-90a4-1fb72e759925-kube-api-access-6w9sd\") pod \"apiserver-76f77b778f-dkmth\" (UID: \"cc06d61c-999f-4431-90a4-1fb72e759925\") " pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.195258 4800 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.208678 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-q5x2z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.214272 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.272820 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fh2dl\" (UniqueName: \"kubernetes.io/projected/e9d93348-48bd-40a5-a930-11745b4ba869-kube-api-access-fh2dl\") pod \"console-operator-58897d9998-bm2fk\" (UID: \"e9d93348-48bd-40a5-a930-11745b4ba869\") " pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.279844 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqnc5\" (UniqueName: \"kubernetes.io/projected/4c143db6-2d6b-49bd-987b-a3fbacb8a562-kube-api-access-zqnc5\") pod \"openshift-config-operator-7777fb866f-nqpgj\" (UID: \"4c143db6-2d6b-49bd-987b-a3fbacb8a562\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.298179 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2swm\" (UniqueName: \"kubernetes.io/projected/a3f6160b-061b-4d7b-beac-5873f6c0192c-kube-api-access-d2swm\") pod \"apiserver-7bbb656c7d-j4lh6\" (UID: \"a3f6160b-061b-4d7b-beac-5873f6c0192c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.316883 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/09fb6a8e-92e7-4f23-8d16-6b8616759965-bound-sa-token\") pod \"ingress-operator-5b745b69d9-r7cmf\" (UID: \"09fb6a8e-92e7-4f23-8d16-6b8616759965\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.318273 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.327600 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.328913 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.336609 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.356662 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.397472 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b2446cfe-e233-473a-a837-40fe268aa5dc-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-bfj8f\" (UID: \"b2446cfe-e233-473a-a837-40fe268aa5dc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.407823 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prh2v\" (UniqueName: \"kubernetes.io/projected/21056d5d-5bd1-4ab2-9f9c-8c6cb6212391-kube-api-access-prh2v\") pod \"machine-approver-56656f9798-hc9bz\" (UID: \"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.413803 4800 request.go:700] Waited for 1.923120627s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/serviceaccounts/cluster-image-registry-operator/token Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.447763 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2z45m\" (UniqueName: \"kubernetes.io/projected/b2446cfe-e233-473a-a837-40fe268aa5dc-kube-api-access-2z45m\") pod \"cluster-image-registry-operator-dc59b4c8b-bfj8f\" (UID: \"b2446cfe-e233-473a-a837-40fe268aa5dc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.460822 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.462537 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-q5x2z"] Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.464443 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sv6wf\" (UniqueName: \"kubernetes.io/projected/0329df6e-d8e5-4b22-be13-f934904b0ae7-kube-api-access-sv6wf\") pod \"openshift-apiserver-operator-796bbdcf4f-7njxz\" (UID: \"0329df6e-d8e5-4b22-be13-f934904b0ae7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" Nov 25 15:18:55 crc kubenswrapper[4800]: W1125 15:18:55.477392 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f015c93_38f5_4f11_9f72_6d99259e4058.slice/crio-9d960793b2374b6cc2c53659e4cd67e87ff9a062d36c5e7a2ed978dfe6f1b54e WatchSource:0}: Error finding container 9d960793b2374b6cc2c53659e4cd67e87ff9a062d36c5e7a2ed978dfe6f1b54e: Status 404 returned error can't find the container with id 9d960793b2374b6cc2c53659e4cd67e87ff9a062d36c5e7a2ed978dfe6f1b54e Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.477491 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smc6g\" (UniqueName: \"kubernetes.io/projected/b228d866-5740-4a89-82b3-53c6272a70cc-kube-api-access-smc6g\") pod \"openshift-controller-manager-operator-756b6f6bc6-6prpd\" (UID: \"b228d866-5740-4a89-82b3-53c6272a70cc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.494969 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.498488 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49v92\" (UniqueName: \"kubernetes.io/projected/b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb-kube-api-access-49v92\") pod \"dns-operator-744455d44c-rfhb2\" (UID: \"b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb\") " pod="openshift-dns-operator/dns-operator-744455d44c-rfhb2" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.515046 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtlw2\" (UniqueName: \"kubernetes.io/projected/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-kube-api-access-qtlw2\") pod \"controller-manager-879f6c89f-5mtjz\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.533150 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9tqq\" (UniqueName: \"kubernetes.io/projected/56f1dabd-4d11-4dc4-9961-efac4124e4a5-kube-api-access-j9tqq\") pod \"oauth-openshift-558db77b4-r4fbv\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.542143 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.556254 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jwlr\" (UniqueName: \"kubernetes.io/projected/0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42-kube-api-access-7jwlr\") pod \"authentication-operator-69f744f599-mbjjh\" (UID: \"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.575504 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-dkmth"] Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.584623 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pxjv\" (UniqueName: \"kubernetes.io/projected/81f88e63-467c-4356-bb2b-b5aa9d93f512-kube-api-access-9pxjv\") pod \"console-f9d7485db-sx8kw\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:55 crc kubenswrapper[4800]: W1125 15:18:55.593548 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc06d61c_999f_4431_90a4_1fb72e759925.slice/crio-c1b97e845621cc32459a598f3fd39cf672f0fe9605bdf38922ec39f71526c764 WatchSource:0}: Error finding container c1b97e845621cc32459a598f3fd39cf672f0fe9605bdf38922ec39f71526c764: Status 404 returned error can't find the container with id c1b97e845621cc32459a598f3fd39cf672f0fe9605bdf38922ec39f71526c764 Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.594215 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb5nn\" (UniqueName: \"kubernetes.io/projected/09fb6a8e-92e7-4f23-8d16-6b8616759965-kube-api-access-zb5nn\") pod \"ingress-operator-5b745b69d9-r7cmf\" (UID: \"09fb6a8e-92e7-4f23-8d16-6b8616759965\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.614303 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98rkq\" (UniqueName: \"kubernetes.io/projected/35aecb5c-7326-4273-9cd4-0820f1ee32b4-kube-api-access-98rkq\") pod \"cluster-samples-operator-665b6dd947-cr942\" (UID: \"35aecb5c-7326-4273-9cd4-0820f1ee32b4\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.640443 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.641323 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.646498 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.656058 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.665999 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-rfhb2" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.673344 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.676067 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.683786 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.687280 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.694671 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.707782 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6bcz\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-kube-api-access-x6bcz\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.707869 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/a451496e-aec1-4381-916e-d9875d29dbd2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-gjqqh\" (UID: \"a451496e-aec1-4381-916e-d9875d29dbd2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.707926 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a451496e-aec1-4381-916e-d9875d29dbd2-config\") pod \"machine-api-operator-5694c8668f-gjqqh\" (UID: \"a451496e-aec1-4381-916e-d9875d29dbd2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.707979 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.708028 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cf691fcb-4403-45a8-80e0-58a2c50f5481-registry-certificates\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.708052 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cf691fcb-4403-45a8-80e0-58a2c50f5481-trusted-ca\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.708078 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cf691fcb-4403-45a8-80e0-58a2c50f5481-installation-pull-secrets\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.708110 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-registry-tls\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.708142 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a451496e-aec1-4381-916e-d9875d29dbd2-images\") pod \"machine-api-operator-5694c8668f-gjqqh\" (UID: \"a451496e-aec1-4381-916e-d9875d29dbd2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.708168 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-bound-sa-token\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.708213 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5m6hn\" (UniqueName: \"kubernetes.io/projected/a451496e-aec1-4381-916e-d9875d29dbd2-kube-api-access-5m6hn\") pod \"machine-api-operator-5694c8668f-gjqqh\" (UID: \"a451496e-aec1-4381-916e-d9875d29dbd2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.708242 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cf691fcb-4403-45a8-80e0-58a2c50f5481-ca-trust-extracted\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: E1125 15:18:55.708759 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:56.208738393 +0000 UTC m=+97.263146875 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.714398 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.731219 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.735541 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.756468 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.765082 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj"] Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.776743 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.797230 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.810732 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811076 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/05140453-39c5-4248-8398-226470d13069-certs\") pod \"machine-config-server-swkh6\" (UID: \"05140453-39c5-4248-8398-226470d13069\") " pod="openshift-machine-config-operator/machine-config-server-swkh6" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811105 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/975d90c1-ba29-4bab-9d10-b971ed9a744f-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-662mz\" (UID: \"975d90c1-ba29-4bab-9d10-b971ed9a744f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811139 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b26fc63a-2ff5-4326-b726-52c072bed8a9-config\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811158 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbebb283-2819-40be-8e65-feefc29bc4a1-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-w2lpp\" (UID: \"fbebb283-2819-40be-8e65-feefc29bc4a1\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811199 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bckkm\" (UniqueName: \"kubernetes.io/projected/b26fc63a-2ff5-4326-b726-52c072bed8a9-kube-api-access-bckkm\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811219 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9bdt\" (UniqueName: \"kubernetes.io/projected/726696e4-25c2-4664-b022-613823361a4f-kube-api-access-z9bdt\") pod \"packageserver-d55dfcdfc-zk27z\" (UID: \"726696e4-25c2-4664-b022-613823361a4f\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811241 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dn49g\" (UniqueName: \"kubernetes.io/projected/f0affc43-5c6d-423a-85d1-73454b3a197b-kube-api-access-dn49g\") pod \"migrator-59844c95c7-ftqrc\" (UID: \"f0affc43-5c6d-423a-85d1-73454b3a197b\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ftqrc" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811266 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b26fc63a-2ff5-4326-b726-52c072bed8a9-etcd-client\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811284 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9d5157df-dc6c-4f18-81a8-96dc67ec7476-signing-cabundle\") pod \"service-ca-9c57cc56f-92c5c\" (UID: \"9d5157df-dc6c-4f18-81a8-96dc67ec7476\") " pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811302 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cf691fcb-4403-45a8-80e0-58a2c50f5481-installation-pull-secrets\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811317 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/975d90c1-ba29-4bab-9d10-b971ed9a744f-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-662mz\" (UID: \"975d90c1-ba29-4bab-9d10-b971ed9a744f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811332 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44gqq\" (UniqueName: \"kubernetes.io/projected/c5366c0c-3c63-4fef-bfa4-7409c182f913-kube-api-access-44gqq\") pod \"machine-config-controller-84d6567774-27frf\" (UID: \"c5366c0c-3c63-4fef-bfa4-7409c182f913\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811367 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-plugins-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811406 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9129efc7-1a53-404e-bca8-f26fe4aa7a7b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-qdjdp\" (UID: \"9129efc7-1a53-404e-bca8-f26fe4aa7a7b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811441 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a451496e-aec1-4381-916e-d9875d29dbd2-images\") pod \"machine-api-operator-5694c8668f-gjqqh\" (UID: \"a451496e-aec1-4381-916e-d9875d29dbd2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811469 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-bound-sa-token\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811521 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7b17a448-d367-4dc3-87ff-0acabb92266e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-lmlfk\" (UID: \"7b17a448-d367-4dc3-87ff-0acabb92266e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lmlfk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811546 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f57363f6-58b9-4a98-893d-9ba2060b31c4-profile-collector-cert\") pod \"olm-operator-6b444d44fb-22v7m\" (UID: \"f57363f6-58b9-4a98-893d-9ba2060b31c4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811564 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnjcw\" (UniqueName: \"kubernetes.io/projected/dd18f589-9ad4-4626-962c-11632f7750ec-kube-api-access-lnjcw\") pod \"control-plane-machine-set-operator-78cbb6b69f-l7wcv\" (UID: \"dd18f589-9ad4-4626-962c-11632f7750ec\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811581 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c50787f1-b3aa-49be-adc2-610beeeede6d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5h48t\" (UID: \"c50787f1-b3aa-49be-adc2-610beeeede6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811602 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c5366c0c-3c63-4fef-bfa4-7409c182f913-proxy-tls\") pod \"machine-config-controller-84d6567774-27frf\" (UID: \"c5366c0c-3c63-4fef-bfa4-7409c182f913\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811618 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2w75\" (UniqueName: \"kubernetes.io/projected/0ca6158f-3cfc-484b-946a-311538680135-kube-api-access-j2w75\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811637 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5-config-volume\") pod \"dns-default-gn26w\" (UID: \"4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5\") " pod="openshift-dns/dns-default-gn26w" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811656 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prsfg\" (UniqueName: \"kubernetes.io/projected/7b17a448-d367-4dc3-87ff-0acabb92266e-kube-api-access-prsfg\") pod \"multus-admission-controller-857f4d67dd-lmlfk\" (UID: \"7b17a448-d367-4dc3-87ff-0acabb92266e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lmlfk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811681 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/975d90c1-ba29-4bab-9d10-b971ed9a744f-config\") pod \"kube-apiserver-operator-766d6c64bb-662mz\" (UID: \"975d90c1-ba29-4bab-9d10-b971ed9a744f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811696 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-csi-data-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811725 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d5c4f943-ed42-40c5-9735-15b2935c7db0-proxy-tls\") pod \"machine-config-operator-74547568cd-4s25t\" (UID: \"d5c4f943-ed42-40c5-9735-15b2935c7db0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811743 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9129efc7-1a53-404e-bca8-f26fe4aa7a7b-config\") pod \"kube-controller-manager-operator-78b949d7b-qdjdp\" (UID: \"9129efc7-1a53-404e-bca8-f26fe4aa7a7b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811759 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9d5157df-dc6c-4f18-81a8-96dc67ec7476-signing-key\") pod \"service-ca-9c57cc56f-92c5c\" (UID: \"9d5157df-dc6c-4f18-81a8-96dc67ec7476\") " pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811776 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f9b17177-8d45-46be-84cd-13a0613df952-metrics-certs\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811794 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-config-volume\") pod \"collect-profiles-29401395-ls8lx\" (UID: \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811839 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-client-ca\") pod \"route-controller-manager-6576b87f9c-2nn24\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811882 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/726696e4-25c2-4664-b022-613823361a4f-webhook-cert\") pod \"packageserver-d55dfcdfc-zk27z\" (UID: \"726696e4-25c2-4664-b022-613823361a4f\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811924 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6bcz\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-kube-api-access-x6bcz\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811938 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b26fc63a-2ff5-4326-b726-52c072bed8a9-etcd-ca\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811956 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqxl2\" (UniqueName: \"kubernetes.io/projected/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-kube-api-access-xqxl2\") pod \"route-controller-manager-6576b87f9c-2nn24\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.811972 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7ssx\" (UniqueName: \"kubernetes.io/projected/f57363f6-58b9-4a98-893d-9ba2060b31c4-kube-api-access-m7ssx\") pod \"olm-operator-6b444d44fb-22v7m\" (UID: \"f57363f6-58b9-4a98-893d-9ba2060b31c4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812063 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-mountpoint-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812095 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b26fc63a-2ff5-4326-b726-52c072bed8a9-etcd-service-ca\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812114 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jmn8\" (UniqueName: \"kubernetes.io/projected/d5c4f943-ed42-40c5-9735-15b2935c7db0-kube-api-access-7jmn8\") pod \"machine-config-operator-74547568cd-4s25t\" (UID: \"d5c4f943-ed42-40c5-9735-15b2935c7db0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812129 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xnz5\" (UniqueName: \"kubernetes.io/projected/4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5-kube-api-access-7xnz5\") pod \"dns-default-gn26w\" (UID: \"4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5\") " pod="openshift-dns/dns-default-gn26w" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812185 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/a451496e-aec1-4381-916e-d9875d29dbd2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-gjqqh\" (UID: \"a451496e-aec1-4381-916e-d9875d29dbd2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812227 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66ef1858-faae-4481-aab0-044995d502fc-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9cw4\" (UID: \"66ef1858-faae-4481-aab0-044995d502fc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812340 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9bln\" (UniqueName: \"kubernetes.io/projected/be8b1bce-ac58-4819-a840-8ad7652edc9d-kube-api-access-r9bln\") pod \"service-ca-operator-777779d784-8stkc\" (UID: \"be8b1bce-ac58-4819-a840-8ad7652edc9d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812360 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66ef1858-faae-4481-aab0-044995d502fc-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9cw4\" (UID: \"66ef1858-faae-4481-aab0-044995d502fc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812376 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/05140453-39c5-4248-8398-226470d13069-node-bootstrap-token\") pod \"machine-config-server-swkh6\" (UID: \"05140453-39c5-4248-8398-226470d13069\") " pod="openshift-machine-config-operator/machine-config-server-swkh6" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812395 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-socket-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812431 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d5c4f943-ed42-40c5-9735-15b2935c7db0-auth-proxy-config\") pod \"machine-config-operator-74547568cd-4s25t\" (UID: \"d5c4f943-ed42-40c5-9735-15b2935c7db0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812447 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-secret-volume\") pod \"collect-profiles-29401395-ls8lx\" (UID: \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812475 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cf691fcb-4403-45a8-80e0-58a2c50f5481-registry-certificates\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812499 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cf691fcb-4403-45a8-80e0-58a2c50f5481-trusted-ca\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812550 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrsff\" (UniqueName: \"kubernetes.io/projected/05140453-39c5-4248-8398-226470d13069-kube-api-access-zrsff\") pod \"machine-config-server-swkh6\" (UID: \"05140453-39c5-4248-8398-226470d13069\") " pod="openshift-machine-config-operator/machine-config-server-swkh6" Nov 25 15:18:55 crc kubenswrapper[4800]: E1125 15:18:55.812596 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:56.312568221 +0000 UTC m=+97.366976693 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812651 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txbqm\" (UniqueName: \"kubernetes.io/projected/9d5157df-dc6c-4f18-81a8-96dc67ec7476-kube-api-access-txbqm\") pod \"service-ca-9c57cc56f-92c5c\" (UID: \"9d5157df-dc6c-4f18-81a8-96dc67ec7476\") " pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812701 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pm88p\" (UniqueName: \"kubernetes.io/projected/fbebb283-2819-40be-8e65-feefc29bc4a1-kube-api-access-pm88p\") pod \"package-server-manager-789f6589d5-w2lpp\" (UID: \"fbebb283-2819-40be-8e65-feefc29bc4a1\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812730 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/acbe8399-e94e-44d9-b2be-360e1f8231ec-srv-cert\") pod \"catalog-operator-68c6474976-rpffk\" (UID: \"acbe8399-e94e-44d9-b2be-360e1f8231ec\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812756 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/acbe8399-e94e-44d9-b2be-360e1f8231ec-profile-collector-cert\") pod \"catalog-operator-68c6474976-rpffk\" (UID: \"acbe8399-e94e-44d9-b2be-360e1f8231ec\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812803 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be8b1bce-ac58-4819-a840-8ad7652edc9d-config\") pod \"service-ca-operator-777779d784-8stkc\" (UID: \"be8b1bce-ac58-4819-a840-8ad7652edc9d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812836 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-registry-tls\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812875 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/f9b17177-8d45-46be-84cd-13a0613df952-stats-auth\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812909 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pd5fx\" (UniqueName: \"kubernetes.io/projected/66ef1858-faae-4481-aab0-044995d502fc-kube-api-access-pd5fx\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9cw4\" (UID: \"66ef1858-faae-4481-aab0-044995d502fc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812932 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c5366c0c-3c63-4fef-bfa4-7409c182f913-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-27frf\" (UID: \"c5366c0c-3c63-4fef-bfa4-7409c182f913\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812969 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d5c4f943-ed42-40c5-9735-15b2935c7db0-images\") pod \"machine-config-operator-74547568cd-4s25t\" (UID: \"d5c4f943-ed42-40c5-9735-15b2935c7db0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.812986 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65ffe8ee-bf0b-4598-a13b-f396179a9ef9-cert\") pod \"ingress-canary-57652\" (UID: \"65ffe8ee-bf0b-4598-a13b-f396179a9ef9\") " pod="openshift-ingress-canary/ingress-canary-57652" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.813017 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgjsn\" (UniqueName: \"kubernetes.io/projected/acbe8399-e94e-44d9-b2be-360e1f8231ec-kube-api-access-dgjsn\") pod \"catalog-operator-68c6474976-rpffk\" (UID: \"acbe8399-e94e-44d9-b2be-360e1f8231ec\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.813076 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/f9b17177-8d45-46be-84cd-13a0613df952-default-certificate\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.813106 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h8p7\" (UniqueName: \"kubernetes.io/projected/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-kube-api-access-8h8p7\") pod \"collect-profiles-29401395-ls8lx\" (UID: \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.813183 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5m6hn\" (UniqueName: \"kubernetes.io/projected/a451496e-aec1-4381-916e-d9875d29dbd2-kube-api-access-5m6hn\") pod \"machine-api-operator-5694c8668f-gjqqh\" (UID: \"a451496e-aec1-4381-916e-d9875d29dbd2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.813230 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9129efc7-1a53-404e-bca8-f26fe4aa7a7b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-qdjdp\" (UID: \"9129efc7-1a53-404e-bca8-f26fe4aa7a7b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.813248 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-serving-cert\") pod \"route-controller-manager-6576b87f9c-2nn24\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814002 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5-metrics-tls\") pod \"dns-default-gn26w\" (UID: \"4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5\") " pod="openshift-dns/dns-default-gn26w" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814068 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cf691fcb-4403-45a8-80e0-58a2c50f5481-ca-trust-extracted\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814122 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kx7j\" (UniqueName: \"kubernetes.io/projected/65ffe8ee-bf0b-4598-a13b-f396179a9ef9-kube-api-access-7kx7j\") pod \"ingress-canary-57652\" (UID: \"65ffe8ee-bf0b-4598-a13b-f396179a9ef9\") " pod="openshift-ingress-canary/ingress-canary-57652" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814152 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f9b17177-8d45-46be-84cd-13a0613df952-service-ca-bundle\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814240 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae0938fa-72d0-4235-8423-6a187f5d854b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fpfh8\" (UID: \"ae0938fa-72d0-4235-8423-6a187f5d854b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814283 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c50787f1-b3aa-49be-adc2-610beeeede6d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5h48t\" (UID: \"c50787f1-b3aa-49be-adc2-610beeeede6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814330 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xh56\" (UniqueName: \"kubernetes.io/projected/c50787f1-b3aa-49be-adc2-610beeeede6d-kube-api-access-7xh56\") pod \"marketplace-operator-79b997595-5h48t\" (UID: \"c50787f1-b3aa-49be-adc2-610beeeede6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814397 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b26fc63a-2ff5-4326-b726-52c072bed8a9-serving-cert\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814424 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f57363f6-58b9-4a98-893d-9ba2060b31c4-srv-cert\") pod \"olm-operator-6b444d44fb-22v7m\" (UID: \"f57363f6-58b9-4a98-893d-9ba2060b31c4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814453 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/dd18f589-9ad4-4626-962c-11632f7750ec-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-l7wcv\" (UID: \"dd18f589-9ad4-4626-962c-11632f7750ec\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814506 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmsb7\" (UniqueName: \"kubernetes.io/projected/f9b17177-8d45-46be-84cd-13a0613df952-kube-api-access-nmsb7\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814574 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-registration-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814594 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ae0938fa-72d0-4235-8423-6a187f5d854b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fpfh8\" (UID: \"ae0938fa-72d0-4235-8423-6a187f5d854b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814613 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ae0938fa-72d0-4235-8423-6a187f5d854b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fpfh8\" (UID: \"ae0938fa-72d0-4235-8423-6a187f5d854b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814631 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-config\") pod \"route-controller-manager-6576b87f9c-2nn24\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814697 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/be8b1bce-ac58-4819-a840-8ad7652edc9d-serving-cert\") pod \"service-ca-operator-777779d784-8stkc\" (UID: \"be8b1bce-ac58-4819-a840-8ad7652edc9d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814732 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/726696e4-25c2-4664-b022-613823361a4f-apiservice-cert\") pod \"packageserver-d55dfcdfc-zk27z\" (UID: \"726696e4-25c2-4664-b022-613823361a4f\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814795 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a451496e-aec1-4381-916e-d9875d29dbd2-config\") pod \"machine-api-operator-5694c8668f-gjqqh\" (UID: \"a451496e-aec1-4381-916e-d9875d29dbd2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.814832 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/726696e4-25c2-4664-b022-613823361a4f-tmpfs\") pod \"packageserver-d55dfcdfc-zk27z\" (UID: \"726696e4-25c2-4664-b022-613823361a4f\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.815267 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.817348 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cf691fcb-4403-45a8-80e0-58a2c50f5481-registry-certificates\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.818097 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cf691fcb-4403-45a8-80e0-58a2c50f5481-trusted-ca\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.813257 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-bm2fk"] Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.819576 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a451496e-aec1-4381-916e-d9875d29dbd2-config\") pod \"machine-api-operator-5694c8668f-gjqqh\" (UID: \"a451496e-aec1-4381-916e-d9875d29dbd2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.819592 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cf691fcb-4403-45a8-80e0-58a2c50f5481-ca-trust-extracted\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.821651 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a451496e-aec1-4381-916e-d9875d29dbd2-images\") pod \"machine-api-operator-5694c8668f-gjqqh\" (UID: \"a451496e-aec1-4381-916e-d9875d29dbd2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.823082 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cf691fcb-4403-45a8-80e0-58a2c50f5481-installation-pull-secrets\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.826047 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-registry-tls\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.827726 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/a451496e-aec1-4381-916e-d9875d29dbd2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-gjqqh\" (UID: \"a451496e-aec1-4381-916e-d9875d29dbd2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.848484 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.856191 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.872675 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5m6hn\" (UniqueName: \"kubernetes.io/projected/a451496e-aec1-4381-916e-d9875d29dbd2-kube-api-access-5m6hn\") pod \"machine-api-operator-5694c8668f-gjqqh\" (UID: \"a451496e-aec1-4381-916e-d9875d29dbd2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.895008 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-bound-sa-token\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921034 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d5c4f943-ed42-40c5-9735-15b2935c7db0-auth-proxy-config\") pod \"machine-config-operator-74547568cd-4s25t\" (UID: \"d5c4f943-ed42-40c5-9735-15b2935c7db0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921101 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-secret-volume\") pod \"collect-profiles-29401395-ls8lx\" (UID: \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921126 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pm88p\" (UniqueName: \"kubernetes.io/projected/fbebb283-2819-40be-8e65-feefc29bc4a1-kube-api-access-pm88p\") pod \"package-server-manager-789f6589d5-w2lpp\" (UID: \"fbebb283-2819-40be-8e65-feefc29bc4a1\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921163 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/acbe8399-e94e-44d9-b2be-360e1f8231ec-srv-cert\") pod \"catalog-operator-68c6474976-rpffk\" (UID: \"acbe8399-e94e-44d9-b2be-360e1f8231ec\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921182 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/acbe8399-e94e-44d9-b2be-360e1f8231ec-profile-collector-cert\") pod \"catalog-operator-68c6474976-rpffk\" (UID: \"acbe8399-e94e-44d9-b2be-360e1f8231ec\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921208 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrsff\" (UniqueName: \"kubernetes.io/projected/05140453-39c5-4248-8398-226470d13069-kube-api-access-zrsff\") pod \"machine-config-server-swkh6\" (UID: \"05140453-39c5-4248-8398-226470d13069\") " pod="openshift-machine-config-operator/machine-config-server-swkh6" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921245 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txbqm\" (UniqueName: \"kubernetes.io/projected/9d5157df-dc6c-4f18-81a8-96dc67ec7476-kube-api-access-txbqm\") pod \"service-ca-9c57cc56f-92c5c\" (UID: \"9d5157df-dc6c-4f18-81a8-96dc67ec7476\") " pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921267 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be8b1bce-ac58-4819-a840-8ad7652edc9d-config\") pod \"service-ca-operator-777779d784-8stkc\" (UID: \"be8b1bce-ac58-4819-a840-8ad7652edc9d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921285 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/f9b17177-8d45-46be-84cd-13a0613df952-stats-auth\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921410 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pd5fx\" (UniqueName: \"kubernetes.io/projected/66ef1858-faae-4481-aab0-044995d502fc-kube-api-access-pd5fx\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9cw4\" (UID: \"66ef1858-faae-4481-aab0-044995d502fc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921434 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c5366c0c-3c63-4fef-bfa4-7409c182f913-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-27frf\" (UID: \"c5366c0c-3c63-4fef-bfa4-7409c182f913\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921453 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65ffe8ee-bf0b-4598-a13b-f396179a9ef9-cert\") pod \"ingress-canary-57652\" (UID: \"65ffe8ee-bf0b-4598-a13b-f396179a9ef9\") " pod="openshift-ingress-canary/ingress-canary-57652" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921500 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgjsn\" (UniqueName: \"kubernetes.io/projected/acbe8399-e94e-44d9-b2be-360e1f8231ec-kube-api-access-dgjsn\") pod \"catalog-operator-68c6474976-rpffk\" (UID: \"acbe8399-e94e-44d9-b2be-360e1f8231ec\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921519 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d5c4f943-ed42-40c5-9735-15b2935c7db0-images\") pod \"machine-config-operator-74547568cd-4s25t\" (UID: \"d5c4f943-ed42-40c5-9735-15b2935c7db0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921542 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/f9b17177-8d45-46be-84cd-13a0613df952-default-certificate\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922196 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h8p7\" (UniqueName: \"kubernetes.io/projected/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-kube-api-access-8h8p7\") pod \"collect-profiles-29401395-ls8lx\" (UID: \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922227 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9129efc7-1a53-404e-bca8-f26fe4aa7a7b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-qdjdp\" (UID: \"9129efc7-1a53-404e-bca8-f26fe4aa7a7b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922270 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-serving-cert\") pod \"route-controller-manager-6576b87f9c-2nn24\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922292 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5-metrics-tls\") pod \"dns-default-gn26w\" (UID: \"4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5\") " pod="openshift-dns/dns-default-gn26w" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922312 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kx7j\" (UniqueName: \"kubernetes.io/projected/65ffe8ee-bf0b-4598-a13b-f396179a9ef9-kube-api-access-7kx7j\") pod \"ingress-canary-57652\" (UID: \"65ffe8ee-bf0b-4598-a13b-f396179a9ef9\") " pod="openshift-ingress-canary/ingress-canary-57652" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922351 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f9b17177-8d45-46be-84cd-13a0613df952-service-ca-bundle\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922372 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae0938fa-72d0-4235-8423-6a187f5d854b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fpfh8\" (UID: \"ae0938fa-72d0-4235-8423-6a187f5d854b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922384 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be8b1bce-ac58-4819-a840-8ad7652edc9d-config\") pod \"service-ca-operator-777779d784-8stkc\" (UID: \"be8b1bce-ac58-4819-a840-8ad7652edc9d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922395 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c50787f1-b3aa-49be-adc2-610beeeede6d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5h48t\" (UID: \"c50787f1-b3aa-49be-adc2-610beeeede6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922477 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xh56\" (UniqueName: \"kubernetes.io/projected/c50787f1-b3aa-49be-adc2-610beeeede6d-kube-api-access-7xh56\") pod \"marketplace-operator-79b997595-5h48t\" (UID: \"c50787f1-b3aa-49be-adc2-610beeeede6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922507 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/dd18f589-9ad4-4626-962c-11632f7750ec-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-l7wcv\" (UID: \"dd18f589-9ad4-4626-962c-11632f7750ec\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922533 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b26fc63a-2ff5-4326-b726-52c072bed8a9-serving-cert\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922551 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f57363f6-58b9-4a98-893d-9ba2060b31c4-srv-cert\") pod \"olm-operator-6b444d44fb-22v7m\" (UID: \"f57363f6-58b9-4a98-893d-9ba2060b31c4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922578 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmsb7\" (UniqueName: \"kubernetes.io/projected/f9b17177-8d45-46be-84cd-13a0613df952-kube-api-access-nmsb7\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922607 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ae0938fa-72d0-4235-8423-6a187f5d854b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fpfh8\" (UID: \"ae0938fa-72d0-4235-8423-6a187f5d854b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922625 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ae0938fa-72d0-4235-8423-6a187f5d854b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fpfh8\" (UID: \"ae0938fa-72d0-4235-8423-6a187f5d854b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922643 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-config\") pod \"route-controller-manager-6576b87f9c-2nn24\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922664 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-registration-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922682 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/726696e4-25c2-4664-b022-613823361a4f-apiservice-cert\") pod \"packageserver-d55dfcdfc-zk27z\" (UID: \"726696e4-25c2-4664-b022-613823361a4f\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922715 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/be8b1bce-ac58-4819-a840-8ad7652edc9d-serving-cert\") pod \"service-ca-operator-777779d784-8stkc\" (UID: \"be8b1bce-ac58-4819-a840-8ad7652edc9d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922743 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/726696e4-25c2-4664-b022-613823361a4f-tmpfs\") pod \"packageserver-d55dfcdfc-zk27z\" (UID: \"726696e4-25c2-4664-b022-613823361a4f\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922765 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/05140453-39c5-4248-8398-226470d13069-certs\") pod \"machine-config-server-swkh6\" (UID: \"05140453-39c5-4248-8398-226470d13069\") " pod="openshift-machine-config-operator/machine-config-server-swkh6" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922782 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/975d90c1-ba29-4bab-9d10-b971ed9a744f-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-662mz\" (UID: \"975d90c1-ba29-4bab-9d10-b971ed9a744f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922805 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbebb283-2819-40be-8e65-feefc29bc4a1-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-w2lpp\" (UID: \"fbebb283-2819-40be-8e65-feefc29bc4a1\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922830 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b26fc63a-2ff5-4326-b726-52c072bed8a9-config\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922869 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9bdt\" (UniqueName: \"kubernetes.io/projected/726696e4-25c2-4664-b022-613823361a4f-kube-api-access-z9bdt\") pod \"packageserver-d55dfcdfc-zk27z\" (UID: \"726696e4-25c2-4664-b022-613823361a4f\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922892 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dn49g\" (UniqueName: \"kubernetes.io/projected/f0affc43-5c6d-423a-85d1-73454b3a197b-kube-api-access-dn49g\") pod \"migrator-59844c95c7-ftqrc\" (UID: \"f0affc43-5c6d-423a-85d1-73454b3a197b\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ftqrc" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922928 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922949 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bckkm\" (UniqueName: \"kubernetes.io/projected/b26fc63a-2ff5-4326-b726-52c072bed8a9-kube-api-access-bckkm\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.922986 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b26fc63a-2ff5-4326-b726-52c072bed8a9-etcd-client\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923008 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9d5157df-dc6c-4f18-81a8-96dc67ec7476-signing-cabundle\") pod \"service-ca-9c57cc56f-92c5c\" (UID: \"9d5157df-dc6c-4f18-81a8-96dc67ec7476\") " pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923027 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/975d90c1-ba29-4bab-9d10-b971ed9a744f-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-662mz\" (UID: \"975d90c1-ba29-4bab-9d10-b971ed9a744f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923046 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44gqq\" (UniqueName: \"kubernetes.io/projected/c5366c0c-3c63-4fef-bfa4-7409c182f913-kube-api-access-44gqq\") pod \"machine-config-controller-84d6567774-27frf\" (UID: \"c5366c0c-3c63-4fef-bfa4-7409c182f913\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923076 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-plugins-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923113 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9129efc7-1a53-404e-bca8-f26fe4aa7a7b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-qdjdp\" (UID: \"9129efc7-1a53-404e-bca8-f26fe4aa7a7b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923174 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7b17a448-d367-4dc3-87ff-0acabb92266e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-lmlfk\" (UID: \"7b17a448-d367-4dc3-87ff-0acabb92266e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lmlfk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923200 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c50787f1-b3aa-49be-adc2-610beeeede6d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5h48t\" (UID: \"c50787f1-b3aa-49be-adc2-610beeeede6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923227 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f57363f6-58b9-4a98-893d-9ba2060b31c4-profile-collector-cert\") pod \"olm-operator-6b444d44fb-22v7m\" (UID: \"f57363f6-58b9-4a98-893d-9ba2060b31c4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923254 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnjcw\" (UniqueName: \"kubernetes.io/projected/dd18f589-9ad4-4626-962c-11632f7750ec-kube-api-access-lnjcw\") pod \"control-plane-machine-set-operator-78cbb6b69f-l7wcv\" (UID: \"dd18f589-9ad4-4626-962c-11632f7750ec\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923278 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5-config-volume\") pod \"dns-default-gn26w\" (UID: \"4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5\") " pod="openshift-dns/dns-default-gn26w" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923301 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prsfg\" (UniqueName: \"kubernetes.io/projected/7b17a448-d367-4dc3-87ff-0acabb92266e-kube-api-access-prsfg\") pod \"multus-admission-controller-857f4d67dd-lmlfk\" (UID: \"7b17a448-d367-4dc3-87ff-0acabb92266e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lmlfk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923324 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c5366c0c-3c63-4fef-bfa4-7409c182f913-proxy-tls\") pod \"machine-config-controller-84d6567774-27frf\" (UID: \"c5366c0c-3c63-4fef-bfa4-7409c182f913\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923346 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2w75\" (UniqueName: \"kubernetes.io/projected/0ca6158f-3cfc-484b-946a-311538680135-kube-api-access-j2w75\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923369 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/975d90c1-ba29-4bab-9d10-b971ed9a744f-config\") pod \"kube-apiserver-operator-766d6c64bb-662mz\" (UID: \"975d90c1-ba29-4bab-9d10-b971ed9a744f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923390 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-csi-data-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923421 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d5c4f943-ed42-40c5-9735-15b2935c7db0-proxy-tls\") pod \"machine-config-operator-74547568cd-4s25t\" (UID: \"d5c4f943-ed42-40c5-9735-15b2935c7db0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923468 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-config-volume\") pod \"collect-profiles-29401395-ls8lx\" (UID: \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923495 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9129efc7-1a53-404e-bca8-f26fe4aa7a7b-config\") pod \"kube-controller-manager-operator-78b949d7b-qdjdp\" (UID: \"9129efc7-1a53-404e-bca8-f26fe4aa7a7b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923517 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9d5157df-dc6c-4f18-81a8-96dc67ec7476-signing-key\") pod \"service-ca-9c57cc56f-92c5c\" (UID: \"9d5157df-dc6c-4f18-81a8-96dc67ec7476\") " pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923537 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f9b17177-8d45-46be-84cd-13a0613df952-metrics-certs\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923557 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/726696e4-25c2-4664-b022-613823361a4f-webhook-cert\") pod \"packageserver-d55dfcdfc-zk27z\" (UID: \"726696e4-25c2-4664-b022-613823361a4f\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923583 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-client-ca\") pod \"route-controller-manager-6576b87f9c-2nn24\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923608 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b26fc63a-2ff5-4326-b726-52c072bed8a9-etcd-ca\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923632 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c50787f1-b3aa-49be-adc2-610beeeede6d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5h48t\" (UID: \"c50787f1-b3aa-49be-adc2-610beeeede6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.928336 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6bcz\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-kube-api-access-x6bcz\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.928468 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/acbe8399-e94e-44d9-b2be-360e1f8231ec-profile-collector-cert\") pod \"catalog-operator-68c6474976-rpffk\" (UID: \"acbe8399-e94e-44d9-b2be-360e1f8231ec\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.921172 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6"] Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.930292 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/f9b17177-8d45-46be-84cd-13a0613df952-stats-auth\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.923633 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqxl2\" (UniqueName: \"kubernetes.io/projected/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-kube-api-access-xqxl2\") pod \"route-controller-manager-6576b87f9c-2nn24\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.930594 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/acbe8399-e94e-44d9-b2be-360e1f8231ec-srv-cert\") pod \"catalog-operator-68c6474976-rpffk\" (UID: \"acbe8399-e94e-44d9-b2be-360e1f8231ec\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.931450 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-config-volume\") pod \"collect-profiles-29401395-ls8lx\" (UID: \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.935641 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b26fc63a-2ff5-4326-b726-52c072bed8a9-config\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.936646 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65ffe8ee-bf0b-4598-a13b-f396179a9ef9-cert\") pod \"ingress-canary-57652\" (UID: \"65ffe8ee-bf0b-4598-a13b-f396179a9ef9\") " pod="openshift-ingress-canary/ingress-canary-57652" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.943379 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbebb283-2819-40be-8e65-feefc29bc4a1-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-w2lpp\" (UID: \"fbebb283-2819-40be-8e65-feefc29bc4a1\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.943523 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f9b17177-8d45-46be-84cd-13a0613df952-metrics-certs\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.943592 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f9b17177-8d45-46be-84cd-13a0613df952-service-ca-bundle\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.943611 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9d5157df-dc6c-4f18-81a8-96dc67ec7476-signing-cabundle\") pod \"service-ca-9c57cc56f-92c5c\" (UID: \"9d5157df-dc6c-4f18-81a8-96dc67ec7476\") " pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.943691 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/726696e4-25c2-4664-b022-613823361a4f-apiservice-cert\") pod \"packageserver-d55dfcdfc-zk27z\" (UID: \"726696e4-25c2-4664-b022-613823361a4f\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.943879 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b26fc63a-2ff5-4326-b726-52c072bed8a9-etcd-ca\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.944398 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-client-ca\") pod \"route-controller-manager-6576b87f9c-2nn24\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.944666 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-registration-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.944855 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9129efc7-1a53-404e-bca8-f26fe4aa7a7b-config\") pod \"kube-controller-manager-operator-78b949d7b-qdjdp\" (UID: \"9129efc7-1a53-404e-bca8-f26fe4aa7a7b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.944830 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5-metrics-tls\") pod \"dns-default-gn26w\" (UID: \"4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5\") " pod="openshift-dns/dns-default-gn26w" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.944957 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/726696e4-25c2-4664-b022-613823361a4f-tmpfs\") pod \"packageserver-d55dfcdfc-zk27z\" (UID: \"726696e4-25c2-4664-b022-613823361a4f\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.945495 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae0938fa-72d0-4235-8423-6a187f5d854b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fpfh8\" (UID: \"ae0938fa-72d0-4235-8423-6a187f5d854b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.945721 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-config\") pod \"route-controller-manager-6576b87f9c-2nn24\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.945907 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-serving-cert\") pod \"route-controller-manager-6576b87f9c-2nn24\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.946355 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9129efc7-1a53-404e-bca8-f26fe4aa7a7b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-qdjdp\" (UID: \"9129efc7-1a53-404e-bca8-f26fe4aa7a7b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.946377 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7b17a448-d367-4dc3-87ff-0acabb92266e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-lmlfk\" (UID: \"7b17a448-d367-4dc3-87ff-0acabb92266e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lmlfk" Nov 25 15:18:55 crc kubenswrapper[4800]: E1125 15:18:55.946867 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:56.44683095 +0000 UTC m=+97.501239432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.947328 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/726696e4-25c2-4664-b022-613823361a4f-webhook-cert\") pod \"packageserver-d55dfcdfc-zk27z\" (UID: \"726696e4-25c2-4664-b022-613823361a4f\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.947452 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-plugins-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.948260 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d5c4f943-ed42-40c5-9735-15b2935c7db0-images\") pod \"machine-config-operator-74547568cd-4s25t\" (UID: \"d5c4f943-ed42-40c5-9735-15b2935c7db0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.948503 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5-config-volume\") pod \"dns-default-gn26w\" (UID: \"4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5\") " pod="openshift-dns/dns-default-gn26w" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.949116 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-csi-data-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.949502 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c5366c0c-3c63-4fef-bfa4-7409c182f913-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-27frf\" (UID: \"c5366c0c-3c63-4fef-bfa4-7409c182f913\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.950167 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/05140453-39c5-4248-8398-226470d13069-certs\") pod \"machine-config-server-swkh6\" (UID: \"05140453-39c5-4248-8398-226470d13069\") " pod="openshift-machine-config-operator/machine-config-server-swkh6" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.950578 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/dd18f589-9ad4-4626-962c-11632f7750ec-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-l7wcv\" (UID: \"dd18f589-9ad4-4626-962c-11632f7750ec\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.951321 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7ssx\" (UniqueName: \"kubernetes.io/projected/f57363f6-58b9-4a98-893d-9ba2060b31c4-kube-api-access-m7ssx\") pod \"olm-operator-6b444d44fb-22v7m\" (UID: \"f57363f6-58b9-4a98-893d-9ba2060b31c4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.951621 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-mountpoint-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.951743 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-mountpoint-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.951803 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jmn8\" (UniqueName: \"kubernetes.io/projected/d5c4f943-ed42-40c5-9735-15b2935c7db0-kube-api-access-7jmn8\") pod \"machine-config-operator-74547568cd-4s25t\" (UID: \"d5c4f943-ed42-40c5-9735-15b2935c7db0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.951869 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xnz5\" (UniqueName: \"kubernetes.io/projected/4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5-kube-api-access-7xnz5\") pod \"dns-default-gn26w\" (UID: \"4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5\") " pod="openshift-dns/dns-default-gn26w" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.951984 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/975d90c1-ba29-4bab-9d10-b971ed9a744f-config\") pod \"kube-apiserver-operator-766d6c64bb-662mz\" (UID: \"975d90c1-ba29-4bab-9d10-b971ed9a744f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.952110 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b26fc63a-2ff5-4326-b726-52c072bed8a9-etcd-service-ca\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.952156 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d5c4f943-ed42-40c5-9735-15b2935c7db0-auth-proxy-config\") pod \"machine-config-operator-74547568cd-4s25t\" (UID: \"d5c4f943-ed42-40c5-9735-15b2935c7db0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.952459 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/975d90c1-ba29-4bab-9d10-b971ed9a744f-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-662mz\" (UID: \"975d90c1-ba29-4bab-9d10-b971ed9a744f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.953151 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66ef1858-faae-4481-aab0-044995d502fc-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9cw4\" (UID: \"66ef1858-faae-4481-aab0-044995d502fc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.954270 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66ef1858-faae-4481-aab0-044995d502fc-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9cw4\" (UID: \"66ef1858-faae-4481-aab0-044995d502fc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.954331 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b26fc63a-2ff5-4326-b726-52c072bed8a9-etcd-service-ca\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.954432 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9bln\" (UniqueName: \"kubernetes.io/projected/be8b1bce-ac58-4819-a840-8ad7652edc9d-kube-api-access-r9bln\") pod \"service-ca-operator-777779d784-8stkc\" (UID: \"be8b1bce-ac58-4819-a840-8ad7652edc9d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.954454 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66ef1858-faae-4481-aab0-044995d502fc-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9cw4\" (UID: \"66ef1858-faae-4481-aab0-044995d502fc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.954490 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/05140453-39c5-4248-8398-226470d13069-node-bootstrap-token\") pod \"machine-config-server-swkh6\" (UID: \"05140453-39c5-4248-8398-226470d13069\") " pod="openshift-machine-config-operator/machine-config-server-swkh6" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.956237 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-socket-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.956388 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/0ca6158f-3cfc-484b-946a-311538680135-socket-dir\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.958768 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c5366c0c-3c63-4fef-bfa4-7409c182f913-proxy-tls\") pod \"machine-config-controller-84d6567774-27frf\" (UID: \"c5366c0c-3c63-4fef-bfa4-7409c182f913\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.958899 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9d5157df-dc6c-4f18-81a8-96dc67ec7476-signing-key\") pod \"service-ca-9c57cc56f-92c5c\" (UID: \"9d5157df-dc6c-4f18-81a8-96dc67ec7476\") " pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.959670 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b26fc63a-2ff5-4326-b726-52c072bed8a9-etcd-client\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.959984 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/05140453-39c5-4248-8398-226470d13069-node-bootstrap-token\") pod \"machine-config-server-swkh6\" (UID: \"05140453-39c5-4248-8398-226470d13069\") " pod="openshift-machine-config-operator/machine-config-server-swkh6" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.960108 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ae0938fa-72d0-4235-8423-6a187f5d854b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fpfh8\" (UID: \"ae0938fa-72d0-4235-8423-6a187f5d854b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.964672 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66ef1858-faae-4481-aab0-044995d502fc-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9cw4\" (UID: \"66ef1858-faae-4481-aab0-044995d502fc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.965361 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c50787f1-b3aa-49be-adc2-610beeeede6d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5h48t\" (UID: \"c50787f1-b3aa-49be-adc2-610beeeede6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.965872 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f57363f6-58b9-4a98-893d-9ba2060b31c4-srv-cert\") pod \"olm-operator-6b444d44fb-22v7m\" (UID: \"f57363f6-58b9-4a98-893d-9ba2060b31c4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.966014 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d5c4f943-ed42-40c5-9735-15b2935c7db0-proxy-tls\") pod \"machine-config-operator-74547568cd-4s25t\" (UID: \"d5c4f943-ed42-40c5-9735-15b2935c7db0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.967297 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/f9b17177-8d45-46be-84cd-13a0613df952-default-certificate\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.967654 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/be8b1bce-ac58-4819-a840-8ad7652edc9d-serving-cert\") pod \"service-ca-operator-777779d784-8stkc\" (UID: \"be8b1bce-ac58-4819-a840-8ad7652edc9d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.968418 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f57363f6-58b9-4a98-893d-9ba2060b31c4-profile-collector-cert\") pod \"olm-operator-6b444d44fb-22v7m\" (UID: \"f57363f6-58b9-4a98-893d-9ba2060b31c4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.969975 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-secret-volume\") pod \"collect-profiles-29401395-ls8lx\" (UID: \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.972209 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqxl2\" (UniqueName: \"kubernetes.io/projected/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-kube-api-access-xqxl2\") pod \"route-controller-manager-6576b87f9c-2nn24\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:55 crc kubenswrapper[4800]: I1125 15:18:55.972384 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pm88p\" (UniqueName: \"kubernetes.io/projected/fbebb283-2819-40be-8e65-feefc29bc4a1-kube-api-access-pm88p\") pod \"package-server-manager-789f6589d5-w2lpp\" (UID: \"fbebb283-2819-40be-8e65-feefc29bc4a1\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.015327 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b26fc63a-2ff5-4326-b726-52c072bed8a9-serving-cert\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.018902 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bckkm\" (UniqueName: \"kubernetes.io/projected/b26fc63a-2ff5-4326-b726-52c072bed8a9-kube-api-access-bckkm\") pod \"etcd-operator-b45778765-9rpcj\" (UID: \"b26fc63a-2ff5-4326-b726-52c072bed8a9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.027413 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.036398 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.037500 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prsfg\" (UniqueName: \"kubernetes.io/projected/7b17a448-d367-4dc3-87ff-0acabb92266e-kube-api-access-prsfg\") pod \"multus-admission-controller-857f4d67dd-lmlfk\" (UID: \"7b17a448-d367-4dc3-87ff-0acabb92266e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lmlfk" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.054647 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrsff\" (UniqueName: \"kubernetes.io/projected/05140453-39c5-4248-8398-226470d13069-kube-api-access-zrsff\") pod \"machine-config-server-swkh6\" (UID: \"05140453-39c5-4248-8398-226470d13069\") " pod="openshift-machine-config-operator/machine-config-server-swkh6" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.060470 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.060794 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:56.560745244 +0000 UTC m=+97.615153736 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.061099 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.061835 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:56.561826281 +0000 UTC m=+97.616234763 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.071092 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-lmlfk" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.097012 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-r4fbv"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.116401 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txbqm\" (UniqueName: \"kubernetes.io/projected/9d5157df-dc6c-4f18-81a8-96dc67ec7476-kube-api-access-txbqm\") pod \"service-ca-9c57cc56f-92c5c\" (UID: \"9d5157df-dc6c-4f18-81a8-96dc67ec7476\") " pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.117163 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h8p7\" (UniqueName: \"kubernetes.io/projected/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-kube-api-access-8h8p7\") pod \"collect-profiles-29401395-ls8lx\" (UID: \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.126934 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.140455 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.140647 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgjsn\" (UniqueName: \"kubernetes.io/projected/acbe8399-e94e-44d9-b2be-360e1f8231ec-kube-api-access-dgjsn\") pod \"catalog-operator-68c6474976-rpffk\" (UID: \"acbe8399-e94e-44d9-b2be-360e1f8231ec\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.141597 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xh56\" (UniqueName: \"kubernetes.io/projected/c50787f1-b3aa-49be-adc2-610beeeede6d-kube-api-access-7xh56\") pod \"marketplace-operator-79b997595-5h48t\" (UID: \"c50787f1-b3aa-49be-adc2-610beeeede6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.150509 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.167867 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.167906 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.168422 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:56.668402716 +0000 UTC m=+97.722811198 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.171650 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44gqq\" (UniqueName: \"kubernetes.io/projected/c5366c0c-3c63-4fef-bfa4-7409c182f913-kube-api-access-44gqq\") pod \"machine-config-controller-84d6567774-27frf\" (UID: \"c5366c0c-3c63-4fef-bfa4-7409c182f913\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.172760 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9129efc7-1a53-404e-bca8-f26fe4aa7a7b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-qdjdp\" (UID: \"9129efc7-1a53-404e-bca8-f26fe4aa7a7b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.174341 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.198052 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-bm2fk" event={"ID":"e9d93348-48bd-40a5-a930-11745b4ba869","Type":"ContainerStarted","Data":"56d5c7da265e1a3f5f2b82ee8071ac221c3a6e63ed06bee64e7d71799560bb32"} Nov 25 15:18:56 crc kubenswrapper[4800]: W1125 15:18:56.198350 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb2446cfe_e233_473a_a837_40fe268aa5dc.slice/crio-1b43cd99597106224947e13a284df0c326e446af0e72d4e605cbfa44b8265b83 WatchSource:0}: Error finding container 1b43cd99597106224947e13a284df0c326e446af0e72d4e605cbfa44b8265b83: Status 404 returned error can't find the container with id 1b43cd99597106224947e13a284df0c326e446af0e72d4e605cbfa44b8265b83 Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.200894 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kx7j\" (UniqueName: \"kubernetes.io/projected/65ffe8ee-bf0b-4598-a13b-f396179a9ef9-kube-api-access-7kx7j\") pod \"ingress-canary-57652\" (UID: \"65ffe8ee-bf0b-4598-a13b-f396179a9ef9\") " pod="openshift-ingress-canary/ingress-canary-57652" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.220280 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" event={"ID":"a3f6160b-061b-4d7b-beac-5873f6c0192c","Type":"ContainerStarted","Data":"3ff877ab65c310f5d4238638279cdd3d2867f1437dce9cf0f34ef3dcd6bf4a4f"} Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.220669 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-swkh6" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.230762 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/975d90c1-ba29-4bab-9d10-b971ed9a744f-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-662mz\" (UID: \"975d90c1-ba29-4bab-9d10-b971ed9a744f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.244346 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" event={"ID":"56f1dabd-4d11-4dc4-9961-efac4124e4a5","Type":"ContainerStarted","Data":"29b38abc898e9f9f72bde729ce2cbd6637d67e7c62298b21b52ce2691eaaf8ce"} Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.247741 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pd5fx\" (UniqueName: \"kubernetes.io/projected/66ef1858-faae-4481-aab0-044995d502fc-kube-api-access-pd5fx\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9cw4\" (UID: \"66ef1858-faae-4481-aab0-044995d502fc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.253004 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" event={"ID":"4c143db6-2d6b-49bd-987b-a3fbacb8a562","Type":"ContainerStarted","Data":"dac7ae28bc9a14927b95905fda7eefb4b3bbbd474faec2966f2c0149e898b86c"} Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.253994 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.264497 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-q5x2z" event={"ID":"6f015c93-38f5-4f11-9f72-6d99259e4058","Type":"ContainerStarted","Data":"ea2bcd0baf124a2783c9e944b0c1a6d08839fdc69e8094bcdda7fbe82e8cdc3c"} Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.264555 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-q5x2z" event={"ID":"6f015c93-38f5-4f11-9f72-6d99259e4058","Type":"ContainerStarted","Data":"9d960793b2374b6cc2c53659e4cd67e87ff9a062d36c5e7a2ed978dfe6f1b54e"} Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.268701 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ae0938fa-72d0-4235-8423-6a187f5d854b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fpfh8\" (UID: \"ae0938fa-72d0-4235-8423-6a187f5d854b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.268868 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-q5x2z" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.268989 4800 patch_prober.go:28] interesting pod/downloads-7954f5f757-q5x2z container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.269053 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q5x2z" podUID="6f015c93-38f5-4f11-9f72-6d99259e4058" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.272003 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.272329 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:56.772315876 +0000 UTC m=+97.826724348 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.276957 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmsb7\" (UniqueName: \"kubernetes.io/projected/f9b17177-8d45-46be-84cd-13a0613df952-kube-api-access-nmsb7\") pod \"router-default-5444994796-t52ch\" (UID: \"f9b17177-8d45-46be-84cd-13a0613df952\") " pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.279747 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" event={"ID":"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391","Type":"ContainerStarted","Data":"3cde0329cf5802c3d93473aad46a3f36db5d5c881b472420c90e62be47943d42"} Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.291278 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" event={"ID":"cc06d61c-999f-4431-90a4-1fb72e759925","Type":"ContainerStarted","Data":"c9f73c13f46b1ae731346b502ccb90ad625d71eeb72e111437b753da91cad888"} Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.291339 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" event={"ID":"cc06d61c-999f-4431-90a4-1fb72e759925","Type":"ContainerStarted","Data":"c1b97e845621cc32459a598f3fd39cf672f0fe9605bdf38922ec39f71526c764"} Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.299867 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dn49g\" (UniqueName: \"kubernetes.io/projected/f0affc43-5c6d-423a-85d1-73454b3a197b-kube-api-access-dn49g\") pod \"migrator-59844c95c7-ftqrc\" (UID: \"f0affc43-5c6d-423a-85d1-73454b3a197b\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ftqrc" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.302741 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.316780 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnjcw\" (UniqueName: \"kubernetes.io/projected/dd18f589-9ad4-4626-962c-11632f7750ec-kube-api-access-lnjcw\") pod \"control-plane-machine-set-operator-78cbb6b69f-l7wcv\" (UID: \"dd18f589-9ad4-4626-962c-11632f7750ec\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.320160 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.330081 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-mbjjh"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.335477 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9bdt\" (UniqueName: \"kubernetes.io/projected/726696e4-25c2-4664-b022-613823361a4f-kube-api-access-z9bdt\") pod \"packageserver-d55dfcdfc-zk27z\" (UID: \"726696e4-25c2-4664-b022-613823361a4f\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.335591 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rfhb2"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.343700 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ftqrc" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.345124 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.355871 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5mtjz"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.358241 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.362268 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.364261 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2w75\" (UniqueName: \"kubernetes.io/projected/0ca6158f-3cfc-484b-946a-311538680135-kube-api-access-j2w75\") pod \"csi-hostpathplugin-xmp86\" (UID: \"0ca6158f-3cfc-484b-946a-311538680135\") " pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.381178 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.383554 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7ssx\" (UniqueName: \"kubernetes.io/projected/f57363f6-58b9-4a98-893d-9ba2060b31c4-kube-api-access-m7ssx\") pod \"olm-operator-6b444d44fb-22v7m\" (UID: \"f57363f6-58b9-4a98-893d-9ba2060b31c4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.385212 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.385717 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.386567 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:56.886539897 +0000 UTC m=+97.940948379 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.386692 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.389382 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:56.889361226 +0000 UTC m=+97.943769708 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.390073 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.401575 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.408745 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.418205 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.427235 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.431013 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xnz5\" (UniqueName: \"kubernetes.io/projected/4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5-kube-api-access-7xnz5\") pod \"dns-default-gn26w\" (UID: \"4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5\") " pod="openshift-dns/dns-default-gn26w" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.431077 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.433674 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9bln\" (UniqueName: \"kubernetes.io/projected/be8b1bce-ac58-4819-a840-8ad7652edc9d-kube-api-access-r9bln\") pod \"service-ca-operator-777779d784-8stkc\" (UID: \"be8b1bce-ac58-4819-a840-8ad7652edc9d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.433702 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jmn8\" (UniqueName: \"kubernetes.io/projected/d5c4f943-ed42-40c5-9735-15b2935c7db0-kube-api-access-7jmn8\") pod \"machine-config-operator-74547568cd-4s25t\" (UID: \"d5c4f943-ed42-40c5-9735-15b2935c7db0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:56 crc kubenswrapper[4800]: W1125 15:18:56.444702 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0bea6317_cfba_4cbb_8dc8_d8c0d55ebb42.slice/crio-eebd34b7fe574c215a6686a766c9279badb3bfba1a760b6c60ecb15ed03cfaaa WatchSource:0}: Error finding container eebd34b7fe574c215a6686a766c9279badb3bfba1a760b6c60ecb15ed03cfaaa: Status 404 returned error can't find the container with id eebd34b7fe574c215a6686a766c9279badb3bfba1a760b6c60ecb15ed03cfaaa Nov 25 15:18:56 crc kubenswrapper[4800]: W1125 15:18:56.447113 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb4bf0d95_3d4d_468d_9d0b_2c2fdc6f2feb.slice/crio-7a308d085c48013f35ed323af8f94c337a9303f273eb3cb0b04f271446b83722 WatchSource:0}: Error finding container 7a308d085c48013f35ed323af8f94c337a9303f273eb3cb0b04f271446b83722: Status 404 returned error can't find the container with id 7a308d085c48013f35ed323af8f94c337a9303f273eb3cb0b04f271446b83722 Nov 25 15:18:56 crc kubenswrapper[4800]: W1125 15:18:56.451086 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0329df6e_d8e5_4b22_be13_f934904b0ae7.slice/crio-a126490be230a79e73fd8e7940b66c19485e6f92b93215c8674208f1119c0566 WatchSource:0}: Error finding container a126490be230a79e73fd8e7940b66c19485e6f92b93215c8674208f1119c0566: Status 404 returned error can't find the container with id a126490be230a79e73fd8e7940b66c19485e6f92b93215c8674208f1119c0566 Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.455817 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.473035 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.481529 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-57652" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.498977 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.499958 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:56.999920128 +0000 UTC m=+98.054328610 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.500230 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.500815 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:57.000795769 +0000 UTC m=+98.055204251 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.503484 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-xmp86" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.530704 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gn26w" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.594309 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-sx8kw"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.601366 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.608359 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:57.108299517 +0000 UTC m=+98.162707999 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.609293 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.609951 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:57.109937337 +0000 UTC m=+98.164345819 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.633019 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9rpcj"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.671481 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.710646 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.711169 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:57.211148801 +0000 UTC m=+98.265557273 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.746457 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24"] Nov 25 15:18:56 crc kubenswrapper[4800]: W1125 15:18:56.772198 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81f88e63_467c_4356_bb2b_b5aa9d93f512.slice/crio-a74f3e9b4c0f99ef2169b20aacbd15d2cf75b03e8f1ddc2c8f9bfb41b5a2dc49 WatchSource:0}: Error finding container a74f3e9b4c0f99ef2169b20aacbd15d2cf75b03e8f1ddc2c8f9bfb41b5a2dc49: Status 404 returned error can't find the container with id a74f3e9b4c0f99ef2169b20aacbd15d2cf75b03e8f1ddc2c8f9bfb41b5a2dc49 Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.786698 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-gjqqh"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.813209 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.813863 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:57.313817142 +0000 UTC m=+98.368225624 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.826214 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-q5x2z" podStartSLOduration=74.826188953 podStartE2EDuration="1m14.826188953s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:56.824746348 +0000 UTC m=+97.879154850" watchObservedRunningTime="2025-11-25 15:18:56.826188953 +0000 UTC m=+97.880597435" Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.845901 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.887858 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.910796 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-lmlfk"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.918326 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.918828 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:57.418705356 +0000 UTC m=+98.473113848 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.918904 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:56 crc kubenswrapper[4800]: E1125 15:18:56.920012 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:57.419467424 +0000 UTC m=+98.473875906 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.924164 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5h48t"] Nov 25 15:18:56 crc kubenswrapper[4800]: I1125 15:18:56.992795 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m"] Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.021192 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:57 crc kubenswrapper[4800]: E1125 15:18:57.021645 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:57.521626392 +0000 UTC m=+98.576034874 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.081060 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8"] Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.124489 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:57 crc kubenswrapper[4800]: E1125 15:18:57.125932 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:57.625914851 +0000 UTC m=+98.680323333 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:57 crc kubenswrapper[4800]: W1125 15:18:57.201639 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1fe3fca3_1127_4a40_bf8e_bb6f2cd7aa40.slice/crio-13c7b321fdf8414f96b1a12802ba024ce4a78db6affcf7b31d08aeaa4aacb319 WatchSource:0}: Error finding container 13c7b321fdf8414f96b1a12802ba024ce4a78db6affcf7b31d08aeaa4aacb319: Status 404 returned error can't find the container with id 13c7b321fdf8414f96b1a12802ba024ce4a78db6affcf7b31d08aeaa4aacb319 Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.222679 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-92c5c"] Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.225242 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:57 crc kubenswrapper[4800]: E1125 15:18:57.225619 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:57.725603359 +0000 UTC m=+98.780011841 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.304805 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z"] Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.331746 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:57 crc kubenswrapper[4800]: E1125 15:18:57.332318 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:57.832299547 +0000 UTC m=+98.886708029 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.333755 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" event={"ID":"c50787f1-b3aa-49be-adc2-610beeeede6d","Type":"ContainerStarted","Data":"99d6a6b9bfaa8329082bf54417f9833b10469a9c3f0d1c9c3066056c769a85a2"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.335357 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-27frf"] Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.337112 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" event={"ID":"ae0938fa-72d0-4235-8423-6a187f5d854b","Type":"ContainerStarted","Data":"13aa7e56b8f97259bc59dea5db8a6861ec2aed7c7e79b7049f1c4fb6065d8bee"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.344187 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-swkh6" event={"ID":"05140453-39c5-4248-8398-226470d13069","Type":"ContainerStarted","Data":"564904cee2991db426a135df8f35b0890439485aac81375dccb21cbf8c9c3857"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.348672 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-lmlfk" event={"ID":"7b17a448-d367-4dc3-87ff-0acabb92266e","Type":"ContainerStarted","Data":"8762e34afb12ffc7b6c21bb82e8845bd4a48d67e52e4b4b3a500f721d5738bd4"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.353554 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rfhb2" event={"ID":"b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb","Type":"ContainerStarted","Data":"7a308d085c48013f35ed323af8f94c337a9303f273eb3cb0b04f271446b83722"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.355269 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" event={"ID":"fbebb283-2819-40be-8e65-feefc29bc4a1","Type":"ContainerStarted","Data":"f51f82f8f5cc7f4ba6c11b0b54a1b82b4507e24a3eab1906c9ce806f7c7a6b25"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.363195 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" event={"ID":"b2446cfe-e233-473a-a837-40fe268aa5dc","Type":"ContainerStarted","Data":"e8dac20f5915f37db9487c0d4bb7362c90f1b639afb0e8751a45d9559eff9307"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.363270 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" event={"ID":"b2446cfe-e233-473a-a837-40fe268aa5dc","Type":"ContainerStarted","Data":"1b43cd99597106224947e13a284df0c326e446af0e72d4e605cbfa44b8265b83"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.399509 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-bm2fk" event={"ID":"e9d93348-48bd-40a5-a930-11745b4ba869","Type":"ContainerStarted","Data":"4a1435066da0aa3f47d9d01dd1635594b772ff2fa67178b0463532f51ea4eeaa"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.400760 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.420020 4800 patch_prober.go:28] interesting pod/console-operator-58897d9998-bm2fk container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.420082 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-bm2fk" podUID="e9d93348-48bd-40a5-a930-11745b4ba869" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.435571 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:57 crc kubenswrapper[4800]: E1125 15:18:57.436058 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:57.936042284 +0000 UTC m=+98.990450766 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.453288 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" event={"ID":"0329df6e-d8e5-4b22-be13-f934904b0ae7","Type":"ContainerStarted","Data":"8bc0c07e9d1ec307ea3561738b2ff97359b5534cacc9975b97b8cdbe9ee30253"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.453768 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" event={"ID":"0329df6e-d8e5-4b22-be13-f934904b0ae7","Type":"ContainerStarted","Data":"a126490be230a79e73fd8e7940b66c19485e6f92b93215c8674208f1119c0566"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.457321 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" event={"ID":"b26fc63a-2ff5-4326-b726-52c072bed8a9","Type":"ContainerStarted","Data":"1b178d99dc9987990bcd32efcbbc24364d9eb1d846e8fe508cb59b737e0e5162"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.460980 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" event={"ID":"f57363f6-58b9-4a98-893d-9ba2060b31c4","Type":"ContainerStarted","Data":"4e5b6a9c4de8ba1cbd2e8c6a8a46b53e29da53ea60190d686624cacdde81c2df"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.463356 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" event={"ID":"a451496e-aec1-4381-916e-d9875d29dbd2","Type":"ContainerStarted","Data":"31158e609e6323d8734225a5bd8b57d5d3112931c03c2b97f68465a9de80dcc7"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.478627 4800 generic.go:334] "Generic (PLEG): container finished" podID="a3f6160b-061b-4d7b-beac-5873f6c0192c" containerID="d11fd6ef0c62a5418315dd5913395543486592cb765d4c874680dfaaab22a24c" exitCode=0 Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.478788 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" event={"ID":"a3f6160b-061b-4d7b-beac-5873f6c0192c","Type":"ContainerDied","Data":"d11fd6ef0c62a5418315dd5913395543486592cb765d4c874680dfaaab22a24c"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.492070 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" event={"ID":"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391","Type":"ContainerStarted","Data":"ced9f08f48869ea94ff0fe636c4e3d482046f71cf3062289b0bd81fcda7a0e21"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.498842 4800 generic.go:334] "Generic (PLEG): container finished" podID="cc06d61c-999f-4431-90a4-1fb72e759925" containerID="c9f73c13f46b1ae731346b502ccb90ad625d71eeb72e111437b753da91cad888" exitCode=0 Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.499404 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" event={"ID":"cc06d61c-999f-4431-90a4-1fb72e759925","Type":"ContainerDied","Data":"c9f73c13f46b1ae731346b502ccb90ad625d71eeb72e111437b753da91cad888"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.501922 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" event={"ID":"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40","Type":"ContainerStarted","Data":"13c7b321fdf8414f96b1a12802ba024ce4a78db6affcf7b31d08aeaa4aacb319"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.504886 4800 generic.go:334] "Generic (PLEG): container finished" podID="4c143db6-2d6b-49bd-987b-a3fbacb8a562" containerID="277a9d52f687849824f56c76349786a5d98bf36375862a542ba213681ae0819c" exitCode=0 Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.504969 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" event={"ID":"4c143db6-2d6b-49bd-987b-a3fbacb8a562","Type":"ContainerDied","Data":"277a9d52f687849824f56c76349786a5d98bf36375862a542ba213681ae0819c"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.507552 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" event={"ID":"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960","Type":"ContainerStarted","Data":"ce70bf90006edd41b4dd58d337bc75659c81293dffd4e892e0223efec413d10a"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.509595 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" event={"ID":"b228d866-5740-4a89-82b3-53c6272a70cc","Type":"ContainerStarted","Data":"fc5c2e895a9fe75e1bb170317ea6949a1c1e5ad3ec12cbb4222796c829f10c26"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.515732 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" event={"ID":"09fb6a8e-92e7-4f23-8d16-6b8616759965","Type":"ContainerStarted","Data":"64c802f42273c44eee95ba7135a9e3c8f6fcb34b0f5fbe6d94ef710edfc241e9"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.515791 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" event={"ID":"09fb6a8e-92e7-4f23-8d16-6b8616759965","Type":"ContainerStarted","Data":"8956e38fa7eb1dcd740621c5a908a4da612a6e2a19b59836b5343e9970861a64"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.521563 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" event={"ID":"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42","Type":"ContainerStarted","Data":"eebd34b7fe574c215a6686a766c9279badb3bfba1a760b6c60ecb15ed03cfaaa"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.523595 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" event={"ID":"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b","Type":"ContainerStarted","Data":"f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.523623 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" event={"ID":"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b","Type":"ContainerStarted","Data":"93cd5b16ed5477ce78133735bd4abf67dc9ae219d4fe51f001799d5bcf8b9832"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.524021 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.525284 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-sx8kw" event={"ID":"81f88e63-467c-4356-bb2b-b5aa9d93f512","Type":"ContainerStarted","Data":"a74f3e9b4c0f99ef2169b20aacbd15d2cf75b03e8f1ddc2c8f9bfb41b5a2dc49"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.526370 4800 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5mtjz container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.526450 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" podUID="ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.529394 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942" event={"ID":"35aecb5c-7326-4273-9cd4-0820f1ee32b4","Type":"ContainerStarted","Data":"d64ae5b3dd680f87647f9f88eb33ff2d7d16b89e9198529e3d9c4f37a2f3c658"} Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.529923 4800 patch_prober.go:28] interesting pod/downloads-7954f5f757-q5x2z container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.530002 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q5x2z" podUID="6f015c93-38f5-4f11-9f72-6d99259e4058" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.537689 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:57 crc kubenswrapper[4800]: E1125 15:18:57.545882 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.045860657 +0000 UTC m=+99.100269139 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.640328 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:57 crc kubenswrapper[4800]: E1125 15:18:57.642533 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.142513242 +0000 UTC m=+99.196921724 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.749009 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:57 crc kubenswrapper[4800]: E1125 15:18:57.749608 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.249555698 +0000 UTC m=+99.303964180 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.771801 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp"] Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.850898 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:57 crc kubenswrapper[4800]: E1125 15:18:57.851039 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.351020888 +0000 UTC m=+99.405429370 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.851575 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:57 crc kubenswrapper[4800]: E1125 15:18:57.851986 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.351972832 +0000 UTC m=+99.406381314 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.946158 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-ftqrc"] Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.955694 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:57 crc kubenswrapper[4800]: E1125 15:18:57.955820 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.4557941 +0000 UTC m=+99.510202582 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.956149 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:57 crc kubenswrapper[4800]: I1125 15:18:57.959023 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4"] Nov 25 15:18:57 crc kubenswrapper[4800]: E1125 15:18:57.959741 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.459718896 +0000 UTC m=+99.514127378 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.020821 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv"] Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.061647 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.061835 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.561807132 +0000 UTC m=+99.616215614 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.062198 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.065258 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.565217535 +0000 UTC m=+99.619626017 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.091445 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-57652"] Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.100792 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" podStartSLOduration=76.10076205 podStartE2EDuration="1m16.10076205s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:58.089367612 +0000 UTC m=+99.143776114" watchObservedRunningTime="2025-11-25 15:18:58.10076205 +0000 UTC m=+99.155170542" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.102290 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-gn26w"] Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.125887 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk"] Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.126774 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7njxz" podStartSLOduration=76.126758803 podStartE2EDuration="1m16.126758803s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:58.121665929 +0000 UTC m=+99.176074401" watchObservedRunningTime="2025-11-25 15:18:58.126758803 +0000 UTC m=+99.181167285" Nov 25 15:18:58 crc kubenswrapper[4800]: W1125 15:18:58.158223 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd18f589_9ad4_4626_962c_11632f7750ec.slice/crio-72f60156b154f533ad6384e27615998ee3b78f9be402c93f7bb01dc6ea256362 WatchSource:0}: Error finding container 72f60156b154f533ad6384e27615998ee3b78f9be402c93f7bb01dc6ea256362: Status 404 returned error can't find the container with id 72f60156b154f533ad6384e27615998ee3b78f9be402c93f7bb01dc6ea256362 Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.162513 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-8stkc"] Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.163474 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-bm2fk" podStartSLOduration=76.163446077 podStartE2EDuration="1m16.163446077s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:58.162280528 +0000 UTC m=+99.216689030" watchObservedRunningTime="2025-11-25 15:18:58.163446077 +0000 UTC m=+99.217854559" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.164896 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.165321 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.665300922 +0000 UTC m=+99.719709404 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.184459 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xmp86"] Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.190783 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t"] Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.206350 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz"] Nov 25 15:18:58 crc kubenswrapper[4800]: W1125 15:18:58.245801 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podacbe8399_e94e_44d9_b2be_360e1f8231ec.slice/crio-a02a8c4b5ba153e49fe8f65a16ad430ea1dd6e20f0a8b1e768f25db3d84d7cdf WatchSource:0}: Error finding container a02a8c4b5ba153e49fe8f65a16ad430ea1dd6e20f0a8b1e768f25db3d84d7cdf: Status 404 returned error can't find the container with id a02a8c4b5ba153e49fe8f65a16ad430ea1dd6e20f0a8b1e768f25db3d84d7cdf Nov 25 15:18:58 crc kubenswrapper[4800]: W1125 15:18:58.267365 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ee4c9e0_ba5e_4f6d_92d3_7bc302d34ae5.slice/crio-707a9c408151c98187cb06b2ec99d3ea5c9993bdded650275934c0894605acc1 WatchSource:0}: Error finding container 707a9c408151c98187cb06b2ec99d3ea5c9993bdded650275934c0894605acc1: Status 404 returned error can't find the container with id 707a9c408151c98187cb06b2ec99d3ea5c9993bdded650275934c0894605acc1 Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.270169 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.270694 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.770678977 +0000 UTC m=+99.825087459 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: W1125 15:18:58.299717 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe8b1bce_ac58_4819_a840_8ad7652edc9d.slice/crio-745019baeb9397b6657ef63f42c12d4149ac4ef00df177b21c3789a3cb7680c1 WatchSource:0}: Error finding container 745019baeb9397b6657ef63f42c12d4149ac4ef00df177b21c3789a3cb7680c1: Status 404 returned error can't find the container with id 745019baeb9397b6657ef63f42c12d4149ac4ef00df177b21c3789a3cb7680c1 Nov 25 15:18:58 crc kubenswrapper[4800]: W1125 15:18:58.304966 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ca6158f_3cfc_484b_946a_311538680135.slice/crio-aa473ec8f5e017ccd987c8392b51c78e68f178c7f2de841b0eb0c6a08d91c66c WatchSource:0}: Error finding container aa473ec8f5e017ccd987c8392b51c78e68f178c7f2de841b0eb0c6a08d91c66c: Status 404 returned error can't find the container with id aa473ec8f5e017ccd987c8392b51c78e68f178c7f2de841b0eb0c6a08d91c66c Nov 25 15:18:58 crc kubenswrapper[4800]: W1125 15:18:58.308693 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd5c4f943_ed42_40c5_9735_15b2935c7db0.slice/crio-becabfa7e4478cb10da02d99a6ffc37eb4246b258283715d92d4a9d9783b2a16 WatchSource:0}: Error finding container becabfa7e4478cb10da02d99a6ffc37eb4246b258283715d92d4a9d9783b2a16: Status 404 returned error can't find the container with id becabfa7e4478cb10da02d99a6ffc37eb4246b258283715d92d4a9d9783b2a16 Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.372912 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.374131 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.874088056 +0000 UTC m=+99.928496538 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.475145 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.475669 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:58.975643438 +0000 UTC m=+100.030051920 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.535962 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" event={"ID":"66ef1858-faae-4481-aab0-044995d502fc","Type":"ContainerStarted","Data":"00563ba169bac72731caccb354edcbf13f6784d2019f89b974a2c34ba925e6ea"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.536790 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gn26w" event={"ID":"4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5","Type":"ContainerStarted","Data":"707a9c408151c98187cb06b2ec99d3ea5c9993bdded650275934c0894605acc1"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.538150 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" event={"ID":"975d90c1-ba29-4bab-9d10-b971ed9a744f","Type":"ContainerStarted","Data":"ad69760d95db864c341c975488036156b385456912dadf99c0dfd35696add3b0"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.539238 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" event={"ID":"acbe8399-e94e-44d9-b2be-360e1f8231ec","Type":"ContainerStarted","Data":"a02a8c4b5ba153e49fe8f65a16ad430ea1dd6e20f0a8b1e768f25db3d84d7cdf"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.540770 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" event={"ID":"d5c4f943-ed42-40c5-9735-15b2935c7db0","Type":"ContainerStarted","Data":"becabfa7e4478cb10da02d99a6ffc37eb4246b258283715d92d4a9d9783b2a16"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.542238 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" event={"ID":"726696e4-25c2-4664-b022-613823361a4f","Type":"ContainerStarted","Data":"1bff6f45cea14eba696a40877e2a8fd143f110f630659d8419c3de401c43714e"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.544893 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xmp86" event={"ID":"0ca6158f-3cfc-484b-946a-311538680135","Type":"ContainerStarted","Data":"aa473ec8f5e017ccd987c8392b51c78e68f178c7f2de841b0eb0c6a08d91c66c"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.546203 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" event={"ID":"9129efc7-1a53-404e-bca8-f26fe4aa7a7b","Type":"ContainerStarted","Data":"07ec4f8a3d2a8d96f0ebce3aa4ed41088ac53e211c60581699c0a5b7ac697c6e"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.548599 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" event={"ID":"b26fc63a-2ff5-4326-b726-52c072bed8a9","Type":"ContainerStarted","Data":"7173f01440242826ba2282665df60d131e528b71e6a85e0519739314d150a4c6"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.553425 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rfhb2" event={"ID":"b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb","Type":"ContainerStarted","Data":"8ff91998caea6237ddeb2d8663f62a0e3472dd2e6a669cee3b9cd0a995a23c63"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.555311 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" event={"ID":"fbebb283-2819-40be-8e65-feefc29bc4a1","Type":"ContainerStarted","Data":"f0677f45738fec7cc8cac7d95b62285ae272bfa849c691ed163541bc5f9aa184"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.558128 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" event={"ID":"56f1dabd-4d11-4dc4-9961-efac4124e4a5","Type":"ContainerStarted","Data":"5841d34da310770db993b72b381be6d55f0cda9de946e28d02ba16e49149e8b4"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.558539 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.559765 4800 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-r4fbv container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.559835 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" podUID="56f1dabd-4d11-4dc4-9961-efac4124e4a5" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.563755 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" event={"ID":"be8b1bce-ac58-4819-a840-8ad7652edc9d","Type":"ContainerStarted","Data":"745019baeb9397b6657ef63f42c12d4149ac4ef00df177b21c3789a3cb7680c1"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.571132 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" event={"ID":"4c143db6-2d6b-49bd-987b-a3fbacb8a562","Type":"ContainerStarted","Data":"c6312843b50e48e1612a86b1b0fa595960c41c19e785487a0cf55e819f48aefd"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.574038 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" event={"ID":"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960","Type":"ContainerStarted","Data":"6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.574403 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.575669 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-57652" event={"ID":"65ffe8ee-bf0b-4598-a13b-f396179a9ef9","Type":"ContainerStarted","Data":"9c00f538968f70a557e38ae944ba5c84df8c3e6818e127b3cec0567bfc371b9e"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.576596 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.577192 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.077169221 +0000 UTC m=+100.131577703 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.577339 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.577690 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.077683224 +0000 UTC m=+100.132091706 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.578301 4800 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-2nn24 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.578353 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" podUID="09e6bcd9-e9bf-408d-9a27-e3d2b7b29960" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.582736 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" event={"ID":"0bea6317-cfba-4cbb-8dc8-d8c0d55ebb42","Type":"ContainerStarted","Data":"70e8d16e1b001b157435773ceaf2ff40fdc8eac3ffc7e52ae5a6f246b7587e42"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.586976 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bfj8f" podStartSLOduration=76.586960719 podStartE2EDuration="1m16.586960719s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:58.270004381 +0000 UTC m=+99.324412873" watchObservedRunningTime="2025-11-25 15:18:58.586960719 +0000 UTC m=+99.641369221" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.588001 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" podStartSLOduration=76.587991014 podStartE2EDuration="1m16.587991014s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:58.586243772 +0000 UTC m=+99.640652254" watchObservedRunningTime="2025-11-25 15:18:58.587991014 +0000 UTC m=+99.642399506" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.595127 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" event={"ID":"cc06d61c-999f-4431-90a4-1fb72e759925","Type":"ContainerStarted","Data":"18b491103c29e2e948c2e91c03953b5d3de9f3fbb94d4a071aa3a0e5a67c2e5e"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.601956 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ftqrc" event={"ID":"f0affc43-5c6d-423a-85d1-73454b3a197b","Type":"ContainerStarted","Data":"a08d4d6fc6eb22361a3b5919aec2ec3877effdb4f89bd19fdb7e633e0dfc5b08"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.603068 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-t52ch" event={"ID":"f9b17177-8d45-46be-84cd-13a0613df952","Type":"ContainerStarted","Data":"b5bad1b2d64d4134948249469c5afb65d8a14e38e44548c0bfb8c67e30d40268"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.604198 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" event={"ID":"9d5157df-dc6c-4f18-81a8-96dc67ec7476","Type":"ContainerStarted","Data":"6e6ede261624b56109bdca77746d415044e78f0e587c5ee0a417b77e9e856276"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.605873 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" event={"ID":"b228d866-5740-4a89-82b3-53c6272a70cc","Type":"ContainerStarted","Data":"5018762aeb6f4f72160c453298fb6482e38c0e9764f2e7db90b7fad66fc1cd20"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.607339 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" podStartSLOduration=75.607292014 podStartE2EDuration="1m15.607292014s" podCreationTimestamp="2025-11-25 15:17:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:58.606926945 +0000 UTC m=+99.661335437" watchObservedRunningTime="2025-11-25 15:18:58.607292014 +0000 UTC m=+99.661700486" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.612174 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" event={"ID":"21056d5d-5bd1-4ab2-9f9c-8c6cb6212391","Type":"ContainerStarted","Data":"5bb29927d7912a219f4d17080766d65ba93b10bb4145f99f64dd19658a43b8b1"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.616099 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv" event={"ID":"dd18f589-9ad4-4626-962c-11632f7750ec","Type":"ContainerStarted","Data":"72f60156b154f533ad6384e27615998ee3b78f9be402c93f7bb01dc6ea256362"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.620498 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" event={"ID":"c5366c0c-3c63-4fef-bfa4-7409c182f913","Type":"ContainerStarted","Data":"2a63c346ec7be78710d6c7f9e5f054bdbef379963fc123c9b08b4755f6d4aab0"} Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.621292 4800 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5mtjz container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.621396 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" podUID="ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.621442 4800 patch_prober.go:28] interesting pod/downloads-7954f5f757-q5x2z container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.621498 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q5x2z" podUID="6f015c93-38f5-4f11-9f72-6d99259e4058" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.628310 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-mbjjh" podStartSLOduration=76.628281705 podStartE2EDuration="1m16.628281705s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:58.625288683 +0000 UTC m=+99.679697165" watchObservedRunningTime="2025-11-25 15:18:58.628281705 +0000 UTC m=+99.682690187" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.635424 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-bm2fk" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.664362 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hc9bz" podStartSLOduration=78.664334674 podStartE2EDuration="1m18.664334674s" podCreationTimestamp="2025-11-25 15:17:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:58.662487568 +0000 UTC m=+99.716896040" watchObservedRunningTime="2025-11-25 15:18:58.664334674 +0000 UTC m=+99.718743156" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.678947 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.680021 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.179996714 +0000 UTC m=+100.234405196 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.685228 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6prpd" podStartSLOduration=76.685204671 podStartE2EDuration="1m16.685204671s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:58.682309921 +0000 UTC m=+99.736718413" watchObservedRunningTime="2025-11-25 15:18:58.685204671 +0000 UTC m=+99.739613153" Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.784567 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.786453 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.286435476 +0000 UTC m=+100.340844158 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.886940 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.887124 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.387089547 +0000 UTC m=+100.441498029 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.887415 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.887854 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.387830815 +0000 UTC m=+100.442239287 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.989754 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.990048 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.489994513 +0000 UTC m=+100.544403015 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:58 crc kubenswrapper[4800]: I1125 15:18:58.991252 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:58 crc kubenswrapper[4800]: E1125 15:18:58.991681 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.491670495 +0000 UTC m=+100.546078977 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.092220 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:59 crc kubenswrapper[4800]: E1125 15:18:59.092397 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.592369886 +0000 UTC m=+100.646778378 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.092574 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:59 crc kubenswrapper[4800]: E1125 15:18:59.093015 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.593004692 +0000 UTC m=+100.647413184 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.193041 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:59 crc kubenswrapper[4800]: E1125 15:18:59.193262 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.693221142 +0000 UTC m=+100.747629624 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.305033 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:59 crc kubenswrapper[4800]: E1125 15:18:59.309194 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.809169536 +0000 UTC m=+100.863578018 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.407626 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:59 crc kubenswrapper[4800]: E1125 15:18:59.408094 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:18:59.908022342 +0000 UTC m=+100.962430824 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.509013 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:59 crc kubenswrapper[4800]: E1125 15:18:59.509453 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.009430692 +0000 UTC m=+101.063839174 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.610510 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:59 crc kubenswrapper[4800]: E1125 15:18:59.611013 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.110967065 +0000 UTC m=+101.165375547 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.653466 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-swkh6" event={"ID":"05140453-39c5-4248-8398-226470d13069","Type":"ContainerStarted","Data":"2a2fe04552d69dcba3d589d053ca2a5bdf1b2640651cb0b2ceb7542c0d3f5171"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.659542 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" event={"ID":"c50787f1-b3aa-49be-adc2-610beeeede6d","Type":"ContainerStarted","Data":"ec5f464cf9cb45a8afe51ceba8d7bab6a53bbc928fd8e3e2911062e029bd7820"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.660822 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.662645 4800 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-5h48t container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.662720 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.669234 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" event={"ID":"d5c4f943-ed42-40c5-9735-15b2935c7db0","Type":"ContainerStarted","Data":"20d98a65a728102a937cc667fd60a314579f61af8fb30ac3f28aa2ae4e426009"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.680484 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" event={"ID":"a3f6160b-061b-4d7b-beac-5873f6c0192c","Type":"ContainerStarted","Data":"901f2e3d4aae57239006888e242070425826bf81c65996b3cb0e37d0a17aec7c"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.685180 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" event={"ID":"c5366c0c-3c63-4fef-bfa4-7409c182f913","Type":"ContainerStarted","Data":"b99e476859a46729fe0361974a52dbfa4a118aacd4bd89ea89579f601dfa8dbc"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.685257 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" event={"ID":"c5366c0c-3c63-4fef-bfa4-7409c182f913","Type":"ContainerStarted","Data":"7c64a3f9c62bf5871e68b5e713a44b838ded5d8ec9e7d79a9a2e343a2250f1a2"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.689930 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-swkh6" podStartSLOduration=6.689904187 podStartE2EDuration="6.689904187s" podCreationTimestamp="2025-11-25 15:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:59.689302952 +0000 UTC m=+100.743711434" watchObservedRunningTime="2025-11-25 15:18:59.689904187 +0000 UTC m=+100.744312669" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.712561 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:59 crc kubenswrapper[4800]: E1125 15:18:59.717670 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.217651922 +0000 UTC m=+101.272060404 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.722874 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-lmlfk" event={"ID":"7b17a448-d367-4dc3-87ff-0acabb92266e","Type":"ContainerStarted","Data":"4c3c1d4064e6cc75deb9574635a95867d7287470ede810dec4e8e7db9f281cb4"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.722935 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-lmlfk" event={"ID":"7b17a448-d367-4dc3-87ff-0acabb92266e","Type":"ContainerStarted","Data":"3307334056ea639017c20422b407641a2435b4f23fdde438cb1e0ea86c12528f"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.732384 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" podStartSLOduration=76.73236476 podStartE2EDuration="1m16.73236476s" podCreationTimestamp="2025-11-25 15:17:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:59.727404349 +0000 UTC m=+100.781812831" watchObservedRunningTime="2025-11-25 15:18:59.73236476 +0000 UTC m=+100.786773242" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.745866 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" event={"ID":"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40","Type":"ContainerStarted","Data":"eb8ce379ff209a0acdbb0a8617d0786ea508d29d36a27ee7ac9c8a1e4baac375"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.751199 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" podStartSLOduration=76.751186749 podStartE2EDuration="1m16.751186749s" podCreationTimestamp="2025-11-25 15:17:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:59.749985699 +0000 UTC m=+100.804394181" watchObservedRunningTime="2025-11-25 15:18:59.751186749 +0000 UTC m=+100.805595231" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.764111 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rfhb2" event={"ID":"b4bf0d95-3d4d-468d-9d0b-2c2fdc6f2feb","Type":"ContainerStarted","Data":"ae1cef7385cc2921885da27c9bb7bfeb11dce78c098b2e5aed02eff0a0a512da"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.773507 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv" event={"ID":"dd18f589-9ad4-4626-962c-11632f7750ec","Type":"ContainerStarted","Data":"b355632e1447bdb18994ec8b2f19b575c363333cbffd46b86dfabf2d89c684b0"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.797663 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gn26w" event={"ID":"4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5","Type":"ContainerStarted","Data":"e506d41332201090055880ff3aac375b21982cf84370ac6f87159dba6db880f6"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.804570 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-27frf" podStartSLOduration=77.804554258 podStartE2EDuration="1m17.804554258s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:59.779302013 +0000 UTC m=+100.833710495" watchObservedRunningTime="2025-11-25 15:18:59.804554258 +0000 UTC m=+100.858962740" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.813810 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:18:59 crc kubenswrapper[4800]: E1125 15:18:59.815205 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.315182146 +0000 UTC m=+101.369590628 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.817916 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-sx8kw" event={"ID":"81f88e63-467c-4356-bb2b-b5aa9d93f512","Type":"ContainerStarted","Data":"c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.827591 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" event={"ID":"acbe8399-e94e-44d9-b2be-360e1f8231ec","Type":"ContainerStarted","Data":"8b582ab8cc8a5a5dd1ea5c3ec257da12e53111b65881092500d02183b81c0592"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.830170 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.832634 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7wcv" podStartSLOduration=77.832621192 podStartE2EDuration="1m17.832621192s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:59.805791018 +0000 UTC m=+100.860199500" watchObservedRunningTime="2025-11-25 15:18:59.832621192 +0000 UTC m=+100.887029674" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.833663 4800 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-rpffk container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.833667 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" podStartSLOduration=78.833662127 podStartE2EDuration="1m18.833662127s" podCreationTimestamp="2025-11-25 15:17:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:59.830979671 +0000 UTC m=+100.885388153" watchObservedRunningTime="2025-11-25 15:18:59.833662127 +0000 UTC m=+100.888070609" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.833696 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" podUID="acbe8399-e94e-44d9-b2be-360e1f8231ec" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.839168 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" event={"ID":"726696e4-25c2-4664-b022-613823361a4f","Type":"ContainerStarted","Data":"90d9d9ba71e987523d84ee28e18e3782c47a54c00fef1a761eb6f146c7d0055d"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.839875 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.846040 4800 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zk27z container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:5443/healthz\": dial tcp 10.217.0.23:5443: connect: connection refused" start-of-body= Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.846082 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" podUID="726696e4-25c2-4664-b022-613823361a4f" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.23:5443/healthz\": dial tcp 10.217.0.23:5443: connect: connection refused" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.857188 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" event={"ID":"f57363f6-58b9-4a98-893d-9ba2060b31c4","Type":"ContainerStarted","Data":"5e71c5a17394ac8ba993aab0d273d7599388846cfb8abd0dc9d662677c180266"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.858303 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.862379 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-lmlfk" podStartSLOduration=77.862363165 podStartE2EDuration="1m17.862363165s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:59.860450049 +0000 UTC m=+100.914858531" watchObservedRunningTime="2025-11-25 15:18:59.862363165 +0000 UTC m=+100.916771647" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.863184 4800 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-22v7m container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.19:8443/healthz\": dial tcp 10.217.0.19:8443: connect: connection refused" start-of-body= Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.863278 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" podUID="f57363f6-58b9-4a98-893d-9ba2060b31c4" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/healthz\": dial tcp 10.217.0.19:8443: connect: connection refused" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.867378 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" event={"ID":"fbebb283-2819-40be-8e65-feefc29bc4a1","Type":"ContainerStarted","Data":"8fd3647693e8e77f144e80b576a492af7f94b495d2251ec966c4bbea64334098"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.867493 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.874506 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942" event={"ID":"35aecb5c-7326-4273-9cd4-0820f1ee32b4","Type":"ContainerStarted","Data":"c19ebbcbeaac64e6508ab7a510f7bc7954cf961eae8e0028457b07245b9278d5"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.874563 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942" event={"ID":"35aecb5c-7326-4273-9cd4-0820f1ee32b4","Type":"ContainerStarted","Data":"cb57f8c5088f8a80a2c9d36c5a9bb8849d4da2a24451ff2f390a83c89dce5314"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.886543 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" event={"ID":"be8b1bce-ac58-4819-a840-8ad7652edc9d","Type":"ContainerStarted","Data":"57b0036e40ae9b6bc27a9f15df531730e60583c93ebaf5eb703e15262ee8708a"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.891436 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-57652" event={"ID":"65ffe8ee-bf0b-4598-a13b-f396179a9ef9","Type":"ContainerStarted","Data":"7b6c07bec35ed7405f6ac7fbd8c738f47df039b41910cf978fe2e877ab0383df"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.902709 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" event={"ID":"a451496e-aec1-4381-916e-d9875d29dbd2","Type":"ContainerStarted","Data":"0aeea25431948e839c886fde22bf6510a0af8a107c1b3e7007026e8bb1b5a40c"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.902759 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" event={"ID":"a451496e-aec1-4381-916e-d9875d29dbd2","Type":"ContainerStarted","Data":"d99eb8299f66522988dfd92656342903f1e8f363802ef765179e82c8c6500148"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.913834 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-t52ch" event={"ID":"f9b17177-8d45-46be-84cd-13a0613df952","Type":"ContainerStarted","Data":"06f3852a5e019826fbf965792bbe00ee245f4a9a24877a4525a6eabe1c19c106"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.915028 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:18:59 crc kubenswrapper[4800]: E1125 15:18:59.918512 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.418498902 +0000 UTC m=+101.472907384 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.935084 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" event={"ID":"9129efc7-1a53-404e-bca8-f26fe4aa7a7b","Type":"ContainerStarted","Data":"2952df0a762e35e8daa8b54ca70a9f727fed1b8b0f20dc80bb4c3669d48368bc"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.937238 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-rfhb2" podStartSLOduration=77.937224689 podStartE2EDuration="1m17.937224689s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:59.899843489 +0000 UTC m=+100.954251971" watchObservedRunningTime="2025-11-25 15:18:59.937224689 +0000 UTC m=+100.991633171" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.937860 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-57652" podStartSLOduration=6.937851734 podStartE2EDuration="6.937851734s" podCreationTimestamp="2025-11-25 15:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:59.936425579 +0000 UTC m=+100.990834071" watchObservedRunningTime="2025-11-25 15:18:59.937851734 +0000 UTC m=+100.992260216" Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.944091 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" event={"ID":"9d5157df-dc6c-4f18-81a8-96dc67ec7476","Type":"ContainerStarted","Data":"5f64e396786907951d73e2f246767e50199c939466b9f23076a220ebf230dae7"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.947692 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" event={"ID":"ae0938fa-72d0-4235-8423-6a187f5d854b","Type":"ContainerStarted","Data":"15b478748d124c54b12d790a4231fbd538048e0514537c69b52cc2fb4585df8c"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.961429 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" event={"ID":"cc06d61c-999f-4431-90a4-1fb72e759925","Type":"ContainerStarted","Data":"1662fe415c1073b9aee64f8f1cdd463510e55d04117b17d3e5e801fd9982abc6"} Nov 25 15:18:59 crc kubenswrapper[4800]: I1125 15:18:59.990102 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ftqrc" event={"ID":"f0affc43-5c6d-423a-85d1-73454b3a197b","Type":"ContainerStarted","Data":"e735684024bf96f77ad4789bb8f6398357195439089a70b5a763c13b70546f24"} Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.014086 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" event={"ID":"09fb6a8e-92e7-4f23-8d16-6b8616759965","Type":"ContainerStarted","Data":"cf6d298350d8b9990ad2d069e602219f8b9ed0c2c04cd2be6235c27aeb4aabc2"} Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.015994 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8stkc" podStartSLOduration=77.015980746 podStartE2EDuration="1m17.015980746s" podCreationTimestamp="2025-11-25 15:17:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:18:59.984127311 +0000 UTC m=+101.038535793" watchObservedRunningTime="2025-11-25 15:19:00.015980746 +0000 UTC m=+101.070389238" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.016773 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.016916 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.516895698 +0000 UTC m=+101.571304180 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.017746 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.020450 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.520437215 +0000 UTC m=+101.574845697 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.021082 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" podStartSLOduration=78.02106583 podStartE2EDuration="1m18.02106583s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.016267013 +0000 UTC m=+101.070675485" watchObservedRunningTime="2025-11-25 15:19:00.02106583 +0000 UTC m=+101.075474312" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.038386 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" event={"ID":"66ef1858-faae-4481-aab0-044995d502fc","Type":"ContainerStarted","Data":"e146ad9edc8cfb83c86a656242031e3dff2e0c2ef694ea638d1f7892451c1ed8"} Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.060870 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" event={"ID":"975d90c1-ba29-4bab-9d10-b971ed9a744f","Type":"ContainerStarted","Data":"6351ae29c057ff0f3628eca439ef6b8769e1a852ad08c44c9e5484096f2917c0"} Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.061083 4800 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-2nn24 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.061125 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" podUID="09e6bcd9-e9bf-408d-9a27-e3d2b7b29960" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.061480 4800 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-r4fbv container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.061538 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" podUID="56f1dabd-4d11-4dc4-9961-efac4124e4a5" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.089981 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" podStartSLOduration=78.089961548 podStartE2EDuration="1m18.089961548s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.050905087 +0000 UTC m=+101.105313569" watchObservedRunningTime="2025-11-25 15:19:00.089961548 +0000 UTC m=+101.144370030" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.090224 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-sx8kw" podStartSLOduration=78.090219894 podStartE2EDuration="1m18.090219894s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.089380633 +0000 UTC m=+101.143789105" watchObservedRunningTime="2025-11-25 15:19:00.090219894 +0000 UTC m=+101.144628376" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.114242 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" podStartSLOduration=78.114221048 podStartE2EDuration="1m18.114221048s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.113088641 +0000 UTC m=+101.167497123" watchObservedRunningTime="2025-11-25 15:19:00.114221048 +0000 UTC m=+101.168629530" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.119367 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.122532 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.62250622 +0000 UTC m=+101.676914772 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.123861 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.128061 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.628046515 +0000 UTC m=+101.682455057 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.133334 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-cr942" podStartSLOduration=78.133314714 podStartE2EDuration="1m18.133314714s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.132056083 +0000 UTC m=+101.186464565" watchObservedRunningTime="2025-11-25 15:19:00.133314714 +0000 UTC m=+101.187723196" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.176455 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" podStartSLOduration=78.176433743 podStartE2EDuration="1m18.176433743s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.160405613 +0000 UTC m=+101.214814095" watchObservedRunningTime="2025-11-25 15:19:00.176433743 +0000 UTC m=+101.230842225" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.178487 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-662mz" podStartSLOduration=78.178478053 podStartE2EDuration="1m18.178478053s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.177237113 +0000 UTC m=+101.231645595" watchObservedRunningTime="2025-11-25 15:19:00.178478053 +0000 UTC m=+101.232886535" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.204565 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-9rpcj" podStartSLOduration=78.204542147 podStartE2EDuration="1m18.204542147s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.198211214 +0000 UTC m=+101.252619686" watchObservedRunningTime="2025-11-25 15:19:00.204542147 +0000 UTC m=+101.258950629" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.224672 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.225120 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.725097259 +0000 UTC m=+101.779505741 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.229419 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" podStartSLOduration=78.229390123 podStartE2EDuration="1m18.229390123s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.225354175 +0000 UTC m=+101.279762657" watchObservedRunningTime="2025-11-25 15:19:00.229390123 +0000 UTC m=+101.283798605" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.269556 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r7cmf" podStartSLOduration=78.269539581 podStartE2EDuration="1m18.269539581s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.267379108 +0000 UTC m=+101.321787610" watchObservedRunningTime="2025-11-25 15:19:00.269539581 +0000 UTC m=+101.323948063" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.271366 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" podStartSLOduration=78.271358555 podStartE2EDuration="1m18.271358555s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.251291626 +0000 UTC m=+101.305700108" watchObservedRunningTime="2025-11-25 15:19:00.271358555 +0000 UTC m=+101.325767027" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.297431 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qdjdp" podStartSLOduration=78.297393919 podStartE2EDuration="1m18.297393919s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.294466118 +0000 UTC m=+101.348874600" watchObservedRunningTime="2025-11-25 15:19:00.297393919 +0000 UTC m=+101.351802401" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.318669 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-t52ch" podStartSLOduration=78.318641216 podStartE2EDuration="1m18.318641216s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.315960411 +0000 UTC m=+101.370368893" watchObservedRunningTime="2025-11-25 15:19:00.318641216 +0000 UTC m=+101.373049698" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.321769 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.324337 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.324389 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.325736 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.326172 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.82615985 +0000 UTC m=+101.880568332 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.329032 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.329497 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.330277 4800 patch_prober.go:28] interesting pod/apiserver-76f77b778f-dkmth container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.20:8443/livez\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.330315 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" podUID="cc06d61c-999f-4431-90a4-1fb72e759925" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.20:8443/livez\": dial tcp 10.217.0.20:8443: connect: connection refused" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.335483 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ftqrc" podStartSLOduration=78.33546715599999 podStartE2EDuration="1m18.335467156s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.333336144 +0000 UTC m=+101.387744636" watchObservedRunningTime="2025-11-25 15:19:00.335467156 +0000 UTC m=+101.389875638" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.349994 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9cw4" podStartSLOduration=78.349969399 podStartE2EDuration="1m18.349969399s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.346399472 +0000 UTC m=+101.400807954" watchObservedRunningTime="2025-11-25 15:19:00.349969399 +0000 UTC m=+101.404377881" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.361918 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-92c5c" podStartSLOduration=77.36190066 podStartE2EDuration="1m17.36190066s" podCreationTimestamp="2025-11-25 15:17:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.36067347 +0000 UTC m=+101.415081952" watchObservedRunningTime="2025-11-25 15:19:00.36190066 +0000 UTC m=+101.416309142" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.384551 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fpfh8" podStartSLOduration=78.384527001 podStartE2EDuration="1m18.384527001s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.38163094 +0000 UTC m=+101.436039422" watchObservedRunningTime="2025-11-25 15:19:00.384527001 +0000 UTC m=+101.438935483" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.427388 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.427902 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.428289 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:00.928234145 +0000 UTC m=+101.982642627 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.430128 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-gjqqh" podStartSLOduration=78.43010453 podStartE2EDuration="1m18.43010453s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:00.428593553 +0000 UTC m=+101.483002035" watchObservedRunningTime="2025-11-25 15:19:00.43010453 +0000 UTC m=+101.484513012" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.449596 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e0ee245-1a7f-4428-bbd9-50de79d2cbd8-metrics-certs\") pod \"network-metrics-daemon-fjqzf\" (UID: \"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8\") " pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.461303 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.461720 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.477660 4800 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-j4lh6 container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.6:8443/livez\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.477742 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" podUID="a3f6160b-061b-4d7b-beac-5873f6c0192c" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.6:8443/livez\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.529013 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.529427 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.029413738 +0000 UTC m=+102.083822220 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.629923 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.630166 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.130139391 +0000 UTC m=+102.184547873 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.630468 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.630825 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.130812028 +0000 UTC m=+102.185220500 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.731395 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.731503 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.23148744 +0000 UTC m=+102.285895922 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.732015 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.732336 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.23232737 +0000 UTC m=+102.286735852 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.737440 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fjqzf" Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.833140 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.833462 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.333447462 +0000 UTC m=+102.387855944 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:00 crc kubenswrapper[4800]: I1125 15:19:00.934758 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:00 crc kubenswrapper[4800]: E1125 15:19:00.935771 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.435753343 +0000 UTC m=+102.490161825 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.037167 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.037658 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.537628714 +0000 UTC m=+102.592037196 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.066260 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gn26w" event={"ID":"4ee4c9e0-ba5e-4f6d-92d3-7bc302d34ae5","Type":"ContainerStarted","Data":"c510687c75a4684991d2557097648fb04635db0135c6391374b1409ac8c51f10"} Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.067370 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-gn26w" Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.069081 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" event={"ID":"d5c4f943-ed42-40c5-9735-15b2935c7db0","Type":"ContainerStarted","Data":"564ab4484f98be0433e9ed48e71f8659ec472f5f85643512c5c7f42be8fdca9c"} Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.072104 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ftqrc" event={"ID":"f0affc43-5c6d-423a-85d1-73454b3a197b","Type":"ContainerStarted","Data":"e2501add2acecc490c34d8e76e078e39174b8053355f32678ea36308836fd030"} Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.074659 4800 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-rpffk container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.074713 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" podUID="acbe8399-e94e-44d9-b2be-360e1f8231ec" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.076536 4800 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-5h48t container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.078426 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.102530 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-fjqzf"] Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.104632 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-gn26w" podStartSLOduration=8.104608285 podStartE2EDuration="8.104608285s" podCreationTimestamp="2025-11-25 15:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:01.099328436 +0000 UTC m=+102.153736918" watchObservedRunningTime="2025-11-25 15:19:01.104608285 +0000 UTC m=+102.159016767" Nov 25 15:19:01 crc kubenswrapper[4800]: W1125 15:19:01.106684 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e0ee245_1a7f_4428_bbd9_50de79d2cbd8.slice/crio-6e7958b082d63b603126cdfb1af2cb02c01f3781476ce31fbb58b2633ffd7976 WatchSource:0}: Error finding container 6e7958b082d63b603126cdfb1af2cb02c01f3781476ce31fbb58b2633ffd7976: Status 404 returned error can't find the container with id 6e7958b082d63b603126cdfb1af2cb02c01f3781476ce31fbb58b2633ffd7976 Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.116884 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22v7m" Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.139058 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.142797 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.642773484 +0000 UTC m=+102.697182156 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.222679 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4s25t" podStartSLOduration=79.222096956 podStartE2EDuration="1m19.222096956s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:01.146193008 +0000 UTC m=+102.200601490" watchObservedRunningTime="2025-11-25 15:19:01.222096956 +0000 UTC m=+102.276505428" Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.241658 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.241961 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.741913078 +0000 UTC m=+102.796321560 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.242094 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.242842 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.7428285 +0000 UTC m=+102.797236972 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.331680 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:01 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:01 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:01 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.331756 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.340763 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.343752 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.344161 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.844139197 +0000 UTC m=+102.898547669 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.445764 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.446090 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:01.94607737 +0000 UTC m=+103.000485852 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.495887 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.497498 4800 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-nqpgj container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.497509 4800 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-nqpgj container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.497542 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" podUID="4c143db6-2d6b-49bd-987b-a3fbacb8a562" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.497572 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" podUID="4c143db6-2d6b-49bd-987b-a3fbacb8a562" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.497905 4800 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-nqpgj container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.497936 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" podUID="4c143db6-2d6b-49bd-987b-a3fbacb8a562" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.566829 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.567786 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.067758783 +0000 UTC m=+103.122167305 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.567839 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.569479 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.069468725 +0000 UTC m=+103.123877217 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.669332 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.669643 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.169591512 +0000 UTC m=+103.223999994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.669740 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.670098 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.170082765 +0000 UTC m=+103.224491247 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.771195 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.771395 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.271366411 +0000 UTC m=+103.325774893 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.771595 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.771984 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.271971795 +0000 UTC m=+103.326380277 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.872443 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.872949 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.372905893 +0000 UTC m=+103.427314375 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.873029 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.873413 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.373397915 +0000 UTC m=+103.427806397 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.973676 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.973936 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.473894312 +0000 UTC m=+103.528302794 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:01 crc kubenswrapper[4800]: I1125 15:19:01.974425 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:01 crc kubenswrapper[4800]: E1125 15:19:01.974799 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.474790954 +0000 UTC m=+103.529199426 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.074961 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:02 crc kubenswrapper[4800]: E1125 15:19:02.075276 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.57525886 +0000 UTC m=+103.629667342 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.075903 4800 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zk27z container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.075987 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" podUID="726696e4-25c2-4664-b022-613823361a4f" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.23:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.077184 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" event={"ID":"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8","Type":"ContainerStarted","Data":"6e7958b082d63b603126cdfb1af2cb02c01f3781476ce31fbb58b2633ffd7976"} Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.078302 4800 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-5h48t container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.078336 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.079378 4800 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-rpffk container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.079410 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" podUID="acbe8399-e94e-44d9-b2be-360e1f8231ec" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.176302 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:02 crc kubenswrapper[4800]: E1125 15:19:02.178754 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.67873963 +0000 UTC m=+103.733148192 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.278444 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:02 crc kubenswrapper[4800]: E1125 15:19:02.279025 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.779009402 +0000 UTC m=+103.833417884 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.327827 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:02 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:02 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:02 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.327921 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.379427 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:02 crc kubenswrapper[4800]: E1125 15:19:02.379801 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.879787956 +0000 UTC m=+103.934196438 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.481052 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:02 crc kubenswrapper[4800]: E1125 15:19:02.481243 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.981212185 +0000 UTC m=+104.035620667 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.481612 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:02 crc kubenswrapper[4800]: E1125 15:19:02.482048 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:02.982040655 +0000 UTC m=+104.036449127 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.582994 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:02 crc kubenswrapper[4800]: E1125 15:19:02.583530 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:03.083490836 +0000 UTC m=+104.137899318 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.684270 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:02 crc kubenswrapper[4800]: E1125 15:19:02.684627 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:03.184609508 +0000 UTC m=+104.239017990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.785192 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:02 crc kubenswrapper[4800]: E1125 15:19:02.785391 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:03.285361152 +0000 UTC m=+104.339769634 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.785677 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:02 crc kubenswrapper[4800]: E1125 15:19:02.786034 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:03.286024238 +0000 UTC m=+104.340432720 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.886758 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:02 crc kubenswrapper[4800]: E1125 15:19:02.887021 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:03.386985386 +0000 UTC m=+104.441393868 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:02 crc kubenswrapper[4800]: I1125 15:19:02.988412 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:02 crc kubenswrapper[4800]: E1125 15:19:02.989017 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:03.48899401 +0000 UTC m=+104.543402532 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.051969 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zk27z" Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.125707 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:03 crc kubenswrapper[4800]: E1125 15:19:03.125996 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:03.625968585 +0000 UTC m=+104.680377067 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.135323 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" event={"ID":"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8","Type":"ContainerStarted","Data":"749ad1f0aa861d4f0c99f43e0c981ff999f70c862ce63dc5179dd7073daf18ce"} Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.135362 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-fjqzf" event={"ID":"3e0ee245-1a7f-4428-bbd9-50de79d2cbd8","Type":"ContainerStarted","Data":"7a4cbbb4068037b97870a51fbc2d0a1420165a33298e8927638353a49dd933e9"} Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.227200 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:03 crc kubenswrapper[4800]: E1125 15:19:03.227590 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:03.72757859 +0000 UTC m=+104.781987072 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.324757 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:03 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:03 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:03 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.324822 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.328634 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:03 crc kubenswrapper[4800]: E1125 15:19:03.329288 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:03.829269966 +0000 UTC m=+104.883678448 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.430571 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:03 crc kubenswrapper[4800]: E1125 15:19:03.430971 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:03.930959172 +0000 UTC m=+104.985367654 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.531138 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:03 crc kubenswrapper[4800]: E1125 15:19:03.531485 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.031455379 +0000 UTC m=+105.085863861 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.632939 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:03 crc kubenswrapper[4800]: E1125 15:19:03.633628 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.133613487 +0000 UTC m=+105.188021969 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.733674 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:03 crc kubenswrapper[4800]: E1125 15:19:03.733934 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.233893358 +0000 UTC m=+105.288301840 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.734198 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:03 crc kubenswrapper[4800]: E1125 15:19:03.734632 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.234615537 +0000 UTC m=+105.289024019 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.835058 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:03 crc kubenswrapper[4800]: E1125 15:19:03.835272 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.335241847 +0000 UTC m=+105.389650329 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.835479 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:03 crc kubenswrapper[4800]: E1125 15:19:03.835861 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.335853672 +0000 UTC m=+105.390262234 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.936456 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:03 crc kubenswrapper[4800]: E1125 15:19:03.936676 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.436631946 +0000 UTC m=+105.491040428 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:03 crc kubenswrapper[4800]: I1125 15:19:03.936807 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:03 crc kubenswrapper[4800]: E1125 15:19:03.937194 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.437186289 +0000 UTC m=+105.491594771 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.037545 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.037825 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.537765488 +0000 UTC m=+105.592173970 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.138873 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.139324 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.63930301 +0000 UTC m=+105.693711492 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.166765 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xmp86" event={"ID":"0ca6158f-3cfc-484b-946a-311538680135","Type":"ContainerStarted","Data":"1489e1c3c49b77706d2b5d59aa03c023c1b1ca4686420a13715cc0f695a66ced"} Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.195177 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-fjqzf" podStartSLOduration=82.195159871 podStartE2EDuration="1m22.195159871s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:04.189427361 +0000 UTC m=+105.243835843" watchObservedRunningTime="2025-11-25 15:19:04.195159871 +0000 UTC m=+105.249568353" Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.239298 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.239537 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.739521411 +0000 UTC m=+105.793929893 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.324184 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:04 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:04 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:04 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.324282 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.340222 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.340804 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.840776426 +0000 UTC m=+105.895185078 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.441131 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.441372 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.941326305 +0000 UTC m=+105.995734787 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.441751 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.442112 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:04.942096443 +0000 UTC m=+105.996504925 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.502347 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.542686 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.542927 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.042883608 +0000 UTC m=+106.097292100 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.543084 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.543551 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.043540753 +0000 UTC m=+106.097949435 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.644791 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.645025 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.144988394 +0000 UTC m=+106.199396876 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.645286 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.645668 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.14565809 +0000 UTC m=+106.200066572 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.746360 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.746633 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.246589018 +0000 UTC m=+106.300997500 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.746708 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.747149 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.247122751 +0000 UTC m=+106.301531413 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.847874 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.848019 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.347994437 +0000 UTC m=+106.402402909 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.848093 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.848459 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.348451328 +0000 UTC m=+106.402859810 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.949354 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.949515 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.449489288 +0000 UTC m=+106.503897770 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.949760 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:04 crc kubenswrapper[4800]: E1125 15:19:04.950226 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.450212357 +0000 UTC m=+106.504620839 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.976525 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.977719 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.981457 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.986067 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 25 15:19:04 crc kubenswrapper[4800]: I1125 15:19:04.991018 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.050743 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.050880 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.550835626 +0000 UTC m=+106.605244118 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.051048 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9d86732b-befd-4a28-855b-3989f9821bdc-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9d86732b-befd-4a28-855b-3989f9821bdc\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.051114 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9d86732b-befd-4a28-855b-3989f9821bdc-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9d86732b-befd-4a28-855b-3989f9821bdc\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.051189 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.051606 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.551594115 +0000 UTC m=+106.606002597 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.152196 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.152368 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9d86732b-befd-4a28-855b-3989f9821bdc-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9d86732b-befd-4a28-855b-3989f9821bdc\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.152433 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.65239264 +0000 UTC m=+106.706801122 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.152534 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9d86732b-befd-4a28-855b-3989f9821bdc-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9d86732b-befd-4a28-855b-3989f9821bdc\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.152683 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9d86732b-befd-4a28-855b-3989f9821bdc-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9d86732b-befd-4a28-855b-3989f9821bdc\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.152739 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.153097 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.653076236 +0000 UTC m=+106.707484918 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.175743 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9d86732b-befd-4a28-855b-3989f9821bdc-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9d86732b-befd-4a28-855b-3989f9821bdc\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.178244 4800 generic.go:334] "Generic (PLEG): container finished" podID="1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40" containerID="eb8ce379ff209a0acdbb0a8617d0786ea508d29d36a27ee7ac9c8a1e4baac375" exitCode=0 Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.178316 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" event={"ID":"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40","Type":"ContainerDied","Data":"eb8ce379ff209a0acdbb0a8617d0786ea508d29d36a27ee7ac9c8a1e4baac375"} Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.209464 4800 patch_prober.go:28] interesting pod/downloads-7954f5f757-q5x2z container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.209542 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q5x2z" podUID="6f015c93-38f5-4f11-9f72-6d99259e4058" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.211155 4800 patch_prober.go:28] interesting pod/downloads-7954f5f757-q5x2z container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.211188 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q5x2z" podUID="6f015c93-38f5-4f11-9f72-6d99259e4058" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.229394 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4mb2k"] Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.230498 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.232410 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.254964 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.255699 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4mb2k"] Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.255763 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knkrs\" (UniqueName: \"kubernetes.io/projected/16615745-a673-44e3-8cd7-980d59c421ad-kube-api-access-knkrs\") pod \"certified-operators-4mb2k\" (UID: \"16615745-a673-44e3-8cd7-980d59c421ad\") " pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.255871 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.755801308 +0000 UTC m=+106.810209790 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.255954 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16615745-a673-44e3-8cd7-980d59c421ad-catalog-content\") pod \"certified-operators-4mb2k\" (UID: \"16615745-a673-44e3-8cd7-980d59c421ad\") " pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.256052 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.256131 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16615745-a673-44e3-8cd7-980d59c421ad-utilities\") pod \"certified-operators-4mb2k\" (UID: \"16615745-a673-44e3-8cd7-980d59c421ad\") " pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.256618 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.756606937 +0000 UTC m=+106.811015419 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.301508 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.335205 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:05 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:05 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:05 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.335277 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.357610 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.357901 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16615745-a673-44e3-8cd7-980d59c421ad-utilities\") pod \"certified-operators-4mb2k\" (UID: \"16615745-a673-44e3-8cd7-980d59c421ad\") " pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.358602 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knkrs\" (UniqueName: \"kubernetes.io/projected/16615745-a673-44e3-8cd7-980d59c421ad-kube-api-access-knkrs\") pod \"certified-operators-4mb2k\" (UID: \"16615745-a673-44e3-8cd7-980d59c421ad\") " pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.358748 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16615745-a673-44e3-8cd7-980d59c421ad-catalog-content\") pod \"certified-operators-4mb2k\" (UID: \"16615745-a673-44e3-8cd7-980d59c421ad\") " pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.359560 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16615745-a673-44e3-8cd7-980d59c421ad-catalog-content\") pod \"certified-operators-4mb2k\" (UID: \"16615745-a673-44e3-8cd7-980d59c421ad\") " pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.359808 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.85978577 +0000 UTC m=+106.914194252 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.367222 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16615745-a673-44e3-8cd7-980d59c421ad-utilities\") pod \"certified-operators-4mb2k\" (UID: \"16615745-a673-44e3-8cd7-980d59c421ad\") " pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.381957 4800 patch_prober.go:28] interesting pod/apiserver-76f77b778f-dkmth container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 15:19:05 crc kubenswrapper[4800]: [+]log ok Nov 25 15:19:05 crc kubenswrapper[4800]: [+]etcd ok Nov 25 15:19:05 crc kubenswrapper[4800]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 15:19:05 crc kubenswrapper[4800]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 15:19:05 crc kubenswrapper[4800]: [+]poststarthook/max-in-flight-filter ok Nov 25 15:19:05 crc kubenswrapper[4800]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 15:19:05 crc kubenswrapper[4800]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 15:19:05 crc kubenswrapper[4800]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 25 15:19:05 crc kubenswrapper[4800]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 25 15:19:05 crc kubenswrapper[4800]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 15:19:05 crc kubenswrapper[4800]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 15:19:05 crc kubenswrapper[4800]: [+]poststarthook/openshift.io-startinformers ok Nov 25 15:19:05 crc kubenswrapper[4800]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 15:19:05 crc kubenswrapper[4800]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 15:19:05 crc kubenswrapper[4800]: livez check failed Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.382044 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" podUID="cc06d61c-999f-4431-90a4-1fb72e759925" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.405806 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knkrs\" (UniqueName: \"kubernetes.io/projected/16615745-a673-44e3-8cd7-980d59c421ad-kube-api-access-knkrs\") pod \"certified-operators-4mb2k\" (UID: \"16615745-a673-44e3-8cd7-980d59c421ad\") " pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.449420 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6td7n"] Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.456562 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.464032 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.465004 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rt2v2\" (UniqueName: \"kubernetes.io/projected/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-kube-api-access-rt2v2\") pod \"community-operators-6td7n\" (UID: \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\") " pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.465094 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-utilities\") pod \"community-operators-6td7n\" (UID: \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\") " pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.465148 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.465177 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-catalog-content\") pod \"community-operators-6td7n\" (UID: \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\") " pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.465487 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:05.965475513 +0000 UTC m=+107.019883995 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.468407 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6td7n"] Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.485448 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.493976 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j4lh6" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.556098 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.565812 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.566045 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-catalog-content\") pod \"community-operators-6td7n\" (UID: \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\") " pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.566145 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rt2v2\" (UniqueName: \"kubernetes.io/projected/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-kube-api-access-rt2v2\") pod \"community-operators-6td7n\" (UID: \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\") " pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.566169 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-utilities\") pod \"community-operators-6td7n\" (UID: \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\") " pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.566914 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-catalog-content\") pod \"community-operators-6td7n\" (UID: \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\") " pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.567026 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.067005466 +0000 UTC m=+107.121413948 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.569295 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-utilities\") pod \"community-operators-6td7n\" (UID: \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\") " pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.596311 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rt2v2\" (UniqueName: \"kubernetes.io/projected/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-kube-api-access-rt2v2\") pod \"community-operators-6td7n\" (UID: \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\") " pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.635492 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7bnkb"] Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.636498 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.646075 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7bnkb"] Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.667248 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tq67\" (UniqueName: \"kubernetes.io/projected/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-kube-api-access-8tq67\") pod \"certified-operators-7bnkb\" (UID: \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\") " pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.667747 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.667791 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-catalog-content\") pod \"certified-operators-7bnkb\" (UID: \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\") " pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.667861 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-utilities\") pod \"certified-operators-7bnkb\" (UID: \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\") " pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.668524 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.168493497 +0000 UTC m=+107.222902149 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.693162 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.754349 4800 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.770014 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.770477 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.270430609 +0000 UTC m=+107.324839101 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.771737 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tq67\" (UniqueName: \"kubernetes.io/projected/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-kube-api-access-8tq67\") pod \"certified-operators-7bnkb\" (UID: \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\") " pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.771810 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.771912 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-catalog-content\") pod \"certified-operators-7bnkb\" (UID: \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\") " pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.771998 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-utilities\") pod \"certified-operators-7bnkb\" (UID: \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\") " pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.774132 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-catalog-content\") pod \"certified-operators-7bnkb\" (UID: \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\") " pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.774201 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.274180171 +0000 UTC m=+107.328588853 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.774222 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-utilities\") pod \"certified-operators-7bnkb\" (UID: \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\") " pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.791733 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.795820 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tq67\" (UniqueName: \"kubernetes.io/projected/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-kube-api-access-8tq67\") pod \"certified-operators-7bnkb\" (UID: \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\") " pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.831888 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8clxk"] Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.836302 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.850766 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.851078 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.852655 4800 patch_prober.go:28] interesting pod/console-f9d7485db-sx8kw container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.29:8443/health\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.852707 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-sx8kw" podUID="81f88e63-467c-4356-bb2b-b5aa9d93f512" containerName="console" probeResult="failure" output="Get \"https://10.217.0.29:8443/health\": dial tcp 10.217.0.29:8443: connect: connection refused" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.854544 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8clxk"] Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.873789 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.874068 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.374043592 +0000 UTC m=+107.428452074 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.874151 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9gkv\" (UniqueName: \"kubernetes.io/projected/00b2c35b-aea8-40f1-af86-ab2ca005e90c-kube-api-access-w9gkv\") pod \"community-operators-8clxk\" (UID: \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\") " pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.874300 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b2c35b-aea8-40f1-af86-ab2ca005e90c-catalog-content\") pod \"community-operators-8clxk\" (UID: \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\") " pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.874325 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.874355 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b2c35b-aea8-40f1-af86-ab2ca005e90c-utilities\") pod \"community-operators-8clxk\" (UID: \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\") " pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.876194 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.376176634 +0000 UTC m=+107.430585326 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.898233 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4mb2k"] Nov 25 15:19:05 crc kubenswrapper[4800]: W1125 15:19:05.904603 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod16615745_a673_44e3_8cd7_980d59c421ad.slice/crio-dfb0638a9365c296fd7392a3e68a62b9417e7c20e0ddcd9d1ae38f07fe56a3f7 WatchSource:0}: Error finding container dfb0638a9365c296fd7392a3e68a62b9417e7c20e0ddcd9d1ae38f07fe56a3f7: Status 404 returned error can't find the container with id dfb0638a9365c296fd7392a3e68a62b9417e7c20e0ddcd9d1ae38f07fe56a3f7 Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.959757 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.963406 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.975362 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.975650 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.475610666 +0000 UTC m=+107.530019158 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.975821 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9gkv\" (UniqueName: \"kubernetes.io/projected/00b2c35b-aea8-40f1-af86-ab2ca005e90c-kube-api-access-w9gkv\") pod \"community-operators-8clxk\" (UID: \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\") " pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.976398 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b2c35b-aea8-40f1-af86-ab2ca005e90c-catalog-content\") pod \"community-operators-8clxk\" (UID: \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\") " pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.976460 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.976512 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b2c35b-aea8-40f1-af86-ab2ca005e90c-utilities\") pod \"community-operators-8clxk\" (UID: \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\") " pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.976939 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b2c35b-aea8-40f1-af86-ab2ca005e90c-catalog-content\") pod \"community-operators-8clxk\" (UID: \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\") " pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:19:05 crc kubenswrapper[4800]: E1125 15:19:05.977253 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.477240235 +0000 UTC m=+107.531648717 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:05 crc kubenswrapper[4800]: I1125 15:19:05.977238 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b2c35b-aea8-40f1-af86-ab2ca005e90c-utilities\") pod \"community-operators-8clxk\" (UID: \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\") " pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.002244 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9gkv\" (UniqueName: \"kubernetes.io/projected/00b2c35b-aea8-40f1-af86-ab2ca005e90c-kube-api-access-w9gkv\") pod \"community-operators-8clxk\" (UID: \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\") " pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.052406 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.077516 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:06 crc kubenswrapper[4800]: E1125 15:19:06.078707 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.578688765 +0000 UTC m=+107.633097247 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.136065 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6td7n"] Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.167119 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.182212 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:19:06 crc kubenswrapper[4800]: W1125 15:19:06.192249 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod932f12b1_d6ce_4e42_b70f_6cd51c1082a1.slice/crio-e3b99a6de1fddd3c43b7916988ec9c2a856ed51e6d643395cf2f02d2607dc6f7 WatchSource:0}: Error finding container e3b99a6de1fddd3c43b7916988ec9c2a856ed51e6d643395cf2f02d2607dc6f7: Status 404 returned error can't find the container with id e3b99a6de1fddd3c43b7916988ec9c2a856ed51e6d643395cf2f02d2607dc6f7 Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.195936 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:06 crc kubenswrapper[4800]: E1125 15:19:06.199635 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.69961565 +0000 UTC m=+107.754024132 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.257414 4800 generic.go:334] "Generic (PLEG): container finished" podID="16615745-a673-44e3-8cd7-980d59c421ad" containerID="1afdd79ba2f2e6809b379a06212dc5d1a0f61977d4c4e8c4360c41c9313e314a" exitCode=0 Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.257541 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mb2k" event={"ID":"16615745-a673-44e3-8cd7-980d59c421ad","Type":"ContainerDied","Data":"1afdd79ba2f2e6809b379a06212dc5d1a0f61977d4c4e8c4360c41c9313e314a"} Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.257578 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mb2k" event={"ID":"16615745-a673-44e3-8cd7-980d59c421ad","Type":"ContainerStarted","Data":"dfb0638a9365c296fd7392a3e68a62b9417e7c20e0ddcd9d1ae38f07fe56a3f7"} Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.260039 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"9d86732b-befd-4a28-855b-3989f9821bdc","Type":"ContainerStarted","Data":"ab1b1026cc29fa243a6a59e9489c77b6ab6be8a0094e321d6bb92d1fd7fc1df8"} Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.273910 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.296699 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:06 crc kubenswrapper[4800]: E1125 15:19:06.296807 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.796786696 +0000 UTC m=+107.851195178 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.297093 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:06 crc kubenswrapper[4800]: E1125 15:19:06.297702 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.797694668 +0000 UTC m=+107.852103150 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.303242 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xmp86" event={"ID":"0ca6158f-3cfc-484b-946a-311538680135","Type":"ContainerStarted","Data":"9f93b63e6155ffb71c99bee4ee852b012f6dccb754521c54d68e578eae0b8e88"} Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.303311 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xmp86" event={"ID":"0ca6158f-3cfc-484b-946a-311538680135","Type":"ContainerStarted","Data":"daf7a2db1611ab7fa66af46cf83724dddb48829f487ca0763653a1b212870b64"} Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.313021 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6td7n" event={"ID":"932f12b1-d6ce-4e42-b70f-6cd51c1082a1","Type":"ContainerStarted","Data":"e3b99a6de1fddd3c43b7916988ec9c2a856ed51e6d643395cf2f02d2607dc6f7"} Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.322662 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.324753 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7bnkb"] Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.332864 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:06 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:06 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:06 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.333996 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:06 crc kubenswrapper[4800]: W1125 15:19:06.374114 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5242e24a_a670_4da1_ad3c_4b13d7b84b6d.slice/crio-011f0a36cf33578a1c6e8fbd3fbef256535135cd907416883a4b8fe97933aaec WatchSource:0}: Error finding container 011f0a36cf33578a1c6e8fbd3fbef256535135cd907416883a4b8fe97933aaec: Status 404 returned error can't find the container with id 011f0a36cf33578a1c6e8fbd3fbef256535135cd907416883a4b8fe97933aaec Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.398043 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:06 crc kubenswrapper[4800]: E1125 15:19:06.399295 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:06.899253651 +0000 UTC m=+107.953662133 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.437071 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rpffk" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.482018 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8clxk"] Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.500031 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:06 crc kubenswrapper[4800]: E1125 15:19:06.500513 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:07.000487026 +0000 UTC m=+108.054895508 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.601075 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:06 crc kubenswrapper[4800]: E1125 15:19:06.601291 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:07.101220649 +0000 UTC m=+108.155629131 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.601533 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:06 crc kubenswrapper[4800]: E1125 15:19:06.601950 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 15:19:07.101939626 +0000 UTC m=+108.156348108 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7txz7" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:06 crc kubenswrapper[4800]: W1125 15:19:06.622231 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00b2c35b_aea8_40f1_af86_ab2ca005e90c.slice/crio-dbe4c339fd2d7d510f30785295cefd698cf3f0ec53c3c7ccdc3d67ca5cabb22e WatchSource:0}: Error finding container dbe4c339fd2d7d510f30785295cefd698cf3f0ec53c3c7ccdc3d67ca5cabb22e: Status 404 returned error can't find the container with id dbe4c339fd2d7d510f30785295cefd698cf3f0ec53c3c7ccdc3d67ca5cabb22e Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.641445 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.702425 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:06 crc kubenswrapper[4800]: E1125 15:19:06.702802 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 15:19:07.202782332 +0000 UTC m=+108.257190814 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.741367 4800 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-25T15:19:05.754382389Z","Handler":null,"Name":""} Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.744432 4800 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.744475 4800 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.803511 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-config-volume\") pod \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\" (UID: \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\") " Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.803561 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8h8p7\" (UniqueName: \"kubernetes.io/projected/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-kube-api-access-8h8p7\") pod \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\" (UID: \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\") " Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.803736 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-secret-volume\") pod \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\" (UID: \"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40\") " Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.803908 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.804621 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-config-volume" (OuterVolumeSpecName: "config-volume") pod "1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40" (UID: "1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.810974 4800 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.811025 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.811318 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40" (UID: "1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.813702 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-kube-api-access-8h8p7" (OuterVolumeSpecName: "kube-api-access-8h8p7") pod "1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40" (UID: "1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40"). InnerVolumeSpecName "kube-api-access-8h8p7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.837377 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7txz7\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.904787 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.921968 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.922333 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8h8p7\" (UniqueName: \"kubernetes.io/projected/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-kube-api-access-8h8p7\") on node \"crc\" DevicePath \"\"" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.922464 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 15:19:06 crc kubenswrapper[4800]: I1125 15:19:06.926399 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.088169 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.241305 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xgrss"] Nov 25 15:19:07 crc kubenswrapper[4800]: E1125 15:19:07.241958 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40" containerName="collect-profiles" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.242085 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40" containerName="collect-profiles" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.242216 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40" containerName="collect-profiles" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.243677 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.246303 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.265827 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xgrss"] Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.320386 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xmp86" event={"ID":"0ca6158f-3cfc-484b-946a-311538680135","Type":"ContainerStarted","Data":"15ba587ab3b8c49782734b6ee620a518f6e2b829835ecfbf4bf6e66d641ae4f7"} Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.324999 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" event={"ID":"1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40","Type":"ContainerDied","Data":"13c7b321fdf8414f96b1a12802ba024ce4a78db6affcf7b31d08aeaa4aacb319"} Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.325024 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13c7b321fdf8414f96b1a12802ba024ce4a78db6affcf7b31d08aeaa4aacb319" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.325105 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.327116 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:07 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:07 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:07 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.327188 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.334658 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40b1358b-2b78-4d92-8e03-baf11a6aecde-utilities\") pod \"redhat-marketplace-xgrss\" (UID: \"40b1358b-2b78-4d92-8e03-baf11a6aecde\") " pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.334706 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shs9d\" (UniqueName: \"kubernetes.io/projected/40b1358b-2b78-4d92-8e03-baf11a6aecde-kube-api-access-shs9d\") pod \"redhat-marketplace-xgrss\" (UID: \"40b1358b-2b78-4d92-8e03-baf11a6aecde\") " pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.334772 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40b1358b-2b78-4d92-8e03-baf11a6aecde-catalog-content\") pod \"redhat-marketplace-xgrss\" (UID: \"40b1358b-2b78-4d92-8e03-baf11a6aecde\") " pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.334799 4800 generic.go:334] "Generic (PLEG): container finished" podID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" containerID="38c421282f3740f1eb94bb9ca6b085e1a5a7323b8b0dc35f5c7313adec8a201b" exitCode=0 Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.334946 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6td7n" event={"ID":"932f12b1-d6ce-4e42-b70f-6cd51c1082a1","Type":"ContainerDied","Data":"38c421282f3740f1eb94bb9ca6b085e1a5a7323b8b0dc35f5c7313adec8a201b"} Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.337958 4800 generic.go:334] "Generic (PLEG): container finished" podID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" containerID="252ef9b0a5a3ff5eca5204bc9bafd74dfa684f7334a4f13095796a23b4cf11d8" exitCode=0 Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.338059 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8clxk" event={"ID":"00b2c35b-aea8-40f1-af86-ab2ca005e90c","Type":"ContainerDied","Data":"252ef9b0a5a3ff5eca5204bc9bafd74dfa684f7334a4f13095796a23b4cf11d8"} Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.338095 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8clxk" event={"ID":"00b2c35b-aea8-40f1-af86-ab2ca005e90c","Type":"ContainerStarted","Data":"dbe4c339fd2d7d510f30785295cefd698cf3f0ec53c3c7ccdc3d67ca5cabb22e"} Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.343613 4800 generic.go:334] "Generic (PLEG): container finished" podID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" containerID="a402e38affe7a59e8302db473684b120a756a5aa9a20fd587b4cfa57a5cdc6df" exitCode=0 Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.343709 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7bnkb" event={"ID":"5242e24a-a670-4da1-ad3c-4b13d7b84b6d","Type":"ContainerDied","Data":"a402e38affe7a59e8302db473684b120a756a5aa9a20fd587b4cfa57a5cdc6df"} Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.343745 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7bnkb" event={"ID":"5242e24a-a670-4da1-ad3c-4b13d7b84b6d","Type":"ContainerStarted","Data":"011f0a36cf33578a1c6e8fbd3fbef256535135cd907416883a4b8fe97933aaec"} Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.353136 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"9d86732b-befd-4a28-855b-3989f9821bdc","Type":"ContainerStarted","Data":"a922a0b1f82d58908c267bb1999f91191c278c3ff096a9dd138e3872a7a766ec"} Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.356270 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7txz7"] Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.358099 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-xmp86" podStartSLOduration=14.358082889 podStartE2EDuration="14.358082889s" podCreationTimestamp="2025-11-25 15:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:07.350943764 +0000 UTC m=+108.405352246" watchObservedRunningTime="2025-11-25 15:19:07.358082889 +0000 UTC m=+108.412491371" Nov 25 15:19:07 crc kubenswrapper[4800]: W1125 15:19:07.364626 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf691fcb_4403_45a8_80e0_58a2c50f5481.slice/crio-eff66c7ebbb71bc22ced9b54d377bab1a948490fea144cf402feb69a3eebae3b WatchSource:0}: Error finding container eff66c7ebbb71bc22ced9b54d377bab1a948490fea144cf402feb69a3eebae3b: Status 404 returned error can't find the container with id eff66c7ebbb71bc22ced9b54d377bab1a948490fea144cf402feb69a3eebae3b Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.436856 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40b1358b-2b78-4d92-8e03-baf11a6aecde-utilities\") pod \"redhat-marketplace-xgrss\" (UID: \"40b1358b-2b78-4d92-8e03-baf11a6aecde\") " pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.436920 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shs9d\" (UniqueName: \"kubernetes.io/projected/40b1358b-2b78-4d92-8e03-baf11a6aecde-kube-api-access-shs9d\") pod \"redhat-marketplace-xgrss\" (UID: \"40b1358b-2b78-4d92-8e03-baf11a6aecde\") " pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.437014 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40b1358b-2b78-4d92-8e03-baf11a6aecde-catalog-content\") pod \"redhat-marketplace-xgrss\" (UID: \"40b1358b-2b78-4d92-8e03-baf11a6aecde\") " pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.439624 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40b1358b-2b78-4d92-8e03-baf11a6aecde-utilities\") pod \"redhat-marketplace-xgrss\" (UID: \"40b1358b-2b78-4d92-8e03-baf11a6aecde\") " pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.440718 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40b1358b-2b78-4d92-8e03-baf11a6aecde-catalog-content\") pod \"redhat-marketplace-xgrss\" (UID: \"40b1358b-2b78-4d92-8e03-baf11a6aecde\") " pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.440914 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.4409013760000002 podStartE2EDuration="3.440901376s" podCreationTimestamp="2025-11-25 15:19:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:07.437358349 +0000 UTC m=+108.491766831" watchObservedRunningTime="2025-11-25 15:19:07.440901376 +0000 UTC m=+108.495309858" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.458816 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shs9d\" (UniqueName: \"kubernetes.io/projected/40b1358b-2b78-4d92-8e03-baf11a6aecde-kube-api-access-shs9d\") pod \"redhat-marketplace-xgrss\" (UID: \"40b1358b-2b78-4d92-8e03-baf11a6aecde\") " pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.563906 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.623549 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fx96n"] Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.625050 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.637715 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fx96n"] Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.744361 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79762218-0d90-43f8-a512-a9b95dd3486e-catalog-content\") pod \"redhat-marketplace-fx96n\" (UID: \"79762218-0d90-43f8-a512-a9b95dd3486e\") " pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.744452 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpdxj\" (UniqueName: \"kubernetes.io/projected/79762218-0d90-43f8-a512-a9b95dd3486e-kube-api-access-xpdxj\") pod \"redhat-marketplace-fx96n\" (UID: \"79762218-0d90-43f8-a512-a9b95dd3486e\") " pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.744689 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79762218-0d90-43f8-a512-a9b95dd3486e-utilities\") pod \"redhat-marketplace-fx96n\" (UID: \"79762218-0d90-43f8-a512-a9b95dd3486e\") " pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.801204 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.842226 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xgrss"] Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.846296 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79762218-0d90-43f8-a512-a9b95dd3486e-utilities\") pod \"redhat-marketplace-fx96n\" (UID: \"79762218-0d90-43f8-a512-a9b95dd3486e\") " pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.846389 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79762218-0d90-43f8-a512-a9b95dd3486e-catalog-content\") pod \"redhat-marketplace-fx96n\" (UID: \"79762218-0d90-43f8-a512-a9b95dd3486e\") " pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.846445 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpdxj\" (UniqueName: \"kubernetes.io/projected/79762218-0d90-43f8-a512-a9b95dd3486e-kube-api-access-xpdxj\") pod \"redhat-marketplace-fx96n\" (UID: \"79762218-0d90-43f8-a512-a9b95dd3486e\") " pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.847342 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79762218-0d90-43f8-a512-a9b95dd3486e-utilities\") pod \"redhat-marketplace-fx96n\" (UID: \"79762218-0d90-43f8-a512-a9b95dd3486e\") " pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.847682 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79762218-0d90-43f8-a512-a9b95dd3486e-catalog-content\") pod \"redhat-marketplace-fx96n\" (UID: \"79762218-0d90-43f8-a512-a9b95dd3486e\") " pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:19:07 crc kubenswrapper[4800]: I1125 15:19:07.873795 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpdxj\" (UniqueName: \"kubernetes.io/projected/79762218-0d90-43f8-a512-a9b95dd3486e-kube-api-access-xpdxj\") pod \"redhat-marketplace-fx96n\" (UID: \"79762218-0d90-43f8-a512-a9b95dd3486e\") " pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:19:07 crc kubenswrapper[4800]: W1125 15:19:07.899520 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40b1358b_2b78_4d92_8e03_baf11a6aecde.slice/crio-4e9d27ea9e9afee4ed198c07cb6b1cbc58f0975e46e98fb120a015472ad78f3b WatchSource:0}: Error finding container 4e9d27ea9e9afee4ed198c07cb6b1cbc58f0975e46e98fb120a015472ad78f3b: Status 404 returned error can't find the container with id 4e9d27ea9e9afee4ed198c07cb6b1cbc58f0975e46e98fb120a015472ad78f3b Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.002551 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.249218 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fx96n"] Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.325356 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:08 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:08 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:08 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.325446 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.360901 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" event={"ID":"cf691fcb-4403-45a8-80e0-58a2c50f5481","Type":"ContainerStarted","Data":"a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099"} Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.360973 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.360988 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" event={"ID":"cf691fcb-4403-45a8-80e0-58a2c50f5481","Type":"ContainerStarted","Data":"eff66c7ebbb71bc22ced9b54d377bab1a948490fea144cf402feb69a3eebae3b"} Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.362517 4800 generic.go:334] "Generic (PLEG): container finished" podID="9d86732b-befd-4a28-855b-3989f9821bdc" containerID="a922a0b1f82d58908c267bb1999f91191c278c3ff096a9dd138e3872a7a766ec" exitCode=0 Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.362594 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"9d86732b-befd-4a28-855b-3989f9821bdc","Type":"ContainerDied","Data":"a922a0b1f82d58908c267bb1999f91191c278c3ff096a9dd138e3872a7a766ec"} Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.378403 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx96n" event={"ID":"79762218-0d90-43f8-a512-a9b95dd3486e","Type":"ContainerStarted","Data":"6c816a4860a5eaa46939a94776757be4ae9f7c78f41a14fa5f0695ceb1093c84"} Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.386596 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgrss" event={"ID":"40b1358b-2b78-4d92-8e03-baf11a6aecde","Type":"ContainerStarted","Data":"4e9d27ea9e9afee4ed198c07cb6b1cbc58f0975e46e98fb120a015472ad78f3b"} Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.406454 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" podStartSLOduration=86.406437916 podStartE2EDuration="1m26.406437916s" podCreationTimestamp="2025-11-25 15:17:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:08.405444242 +0000 UTC m=+109.459852724" watchObservedRunningTime="2025-11-25 15:19:08.406437916 +0000 UTC m=+109.460846398" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.621512 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hpkx7"] Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.622670 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.625182 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.642067 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hpkx7"] Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.662207 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b030df0-0b5c-4854-bdaf-6b61067bed50-utilities\") pod \"redhat-operators-hpkx7\" (UID: \"1b030df0-0b5c-4854-bdaf-6b61067bed50\") " pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.662290 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fm5b\" (UniqueName: \"kubernetes.io/projected/1b030df0-0b5c-4854-bdaf-6b61067bed50-kube-api-access-6fm5b\") pod \"redhat-operators-hpkx7\" (UID: \"1b030df0-0b5c-4854-bdaf-6b61067bed50\") " pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.662453 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b030df0-0b5c-4854-bdaf-6b61067bed50-catalog-content\") pod \"redhat-operators-hpkx7\" (UID: \"1b030df0-0b5c-4854-bdaf-6b61067bed50\") " pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.764127 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b030df0-0b5c-4854-bdaf-6b61067bed50-utilities\") pod \"redhat-operators-hpkx7\" (UID: \"1b030df0-0b5c-4854-bdaf-6b61067bed50\") " pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.764210 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fm5b\" (UniqueName: \"kubernetes.io/projected/1b030df0-0b5c-4854-bdaf-6b61067bed50-kube-api-access-6fm5b\") pod \"redhat-operators-hpkx7\" (UID: \"1b030df0-0b5c-4854-bdaf-6b61067bed50\") " pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.764327 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b030df0-0b5c-4854-bdaf-6b61067bed50-catalog-content\") pod \"redhat-operators-hpkx7\" (UID: \"1b030df0-0b5c-4854-bdaf-6b61067bed50\") " pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.765808 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b030df0-0b5c-4854-bdaf-6b61067bed50-utilities\") pod \"redhat-operators-hpkx7\" (UID: \"1b030df0-0b5c-4854-bdaf-6b61067bed50\") " pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.765831 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b030df0-0b5c-4854-bdaf-6b61067bed50-catalog-content\") pod \"redhat-operators-hpkx7\" (UID: \"1b030df0-0b5c-4854-bdaf-6b61067bed50\") " pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.796149 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fm5b\" (UniqueName: \"kubernetes.io/projected/1b030df0-0b5c-4854-bdaf-6b61067bed50-kube-api-access-6fm5b\") pod \"redhat-operators-hpkx7\" (UID: \"1b030df0-0b5c-4854-bdaf-6b61067bed50\") " pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:19:08 crc kubenswrapper[4800]: I1125 15:19:08.944596 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.006023 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.025068 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8b5b9"] Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.027434 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.036420 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8b5b9"] Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.069017 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfpp5\" (UniqueName: \"kubernetes.io/projected/ac983522-6eeb-4141-a7f4-99e9f6f3b480-kube-api-access-tfpp5\") pod \"redhat-operators-8b5b9\" (UID: \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\") " pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.069103 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac983522-6eeb-4141-a7f4-99e9f6f3b480-catalog-content\") pod \"redhat-operators-8b5b9\" (UID: \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\") " pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.069164 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac983522-6eeb-4141-a7f4-99e9f6f3b480-utilities\") pod \"redhat-operators-8b5b9\" (UID: \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\") " pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.170468 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfpp5\" (UniqueName: \"kubernetes.io/projected/ac983522-6eeb-4141-a7f4-99e9f6f3b480-kube-api-access-tfpp5\") pod \"redhat-operators-8b5b9\" (UID: \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\") " pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.170861 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac983522-6eeb-4141-a7f4-99e9f6f3b480-catalog-content\") pod \"redhat-operators-8b5b9\" (UID: \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\") " pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.170917 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac983522-6eeb-4141-a7f4-99e9f6f3b480-utilities\") pod \"redhat-operators-8b5b9\" (UID: \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\") " pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.171601 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac983522-6eeb-4141-a7f4-99e9f6f3b480-utilities\") pod \"redhat-operators-8b5b9\" (UID: \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\") " pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.172176 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac983522-6eeb-4141-a7f4-99e9f6f3b480-catalog-content\") pod \"redhat-operators-8b5b9\" (UID: \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\") " pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.213954 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfpp5\" (UniqueName: \"kubernetes.io/projected/ac983522-6eeb-4141-a7f4-99e9f6f3b480-kube-api-access-tfpp5\") pod \"redhat-operators-8b5b9\" (UID: \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\") " pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.332262 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:09 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:09 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:09 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.332358 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.363480 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.397621 4800 generic.go:334] "Generic (PLEG): container finished" podID="79762218-0d90-43f8-a512-a9b95dd3486e" containerID="6af55e537855361244aa4eb3b047458d89ed1cffb2c5a2888fcf3e930705c7a5" exitCode=0 Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.397686 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx96n" event={"ID":"79762218-0d90-43f8-a512-a9b95dd3486e","Type":"ContainerDied","Data":"6af55e537855361244aa4eb3b047458d89ed1cffb2c5a2888fcf3e930705c7a5"} Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.400377 4800 generic.go:334] "Generic (PLEG): container finished" podID="40b1358b-2b78-4d92-8e03-baf11a6aecde" containerID="dcfd6ff47404939b3470729964dc05745ada3810d350c1f1ff1c97de7f2310bf" exitCode=0 Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.401040 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgrss" event={"ID":"40b1358b-2b78-4d92-8e03-baf11a6aecde","Type":"ContainerDied","Data":"dcfd6ff47404939b3470729964dc05745ada3810d350c1f1ff1c97de7f2310bf"} Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.452722 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hpkx7"] Nov 25 15:19:09 crc kubenswrapper[4800]: W1125 15:19:09.469572 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b030df0_0b5c_4854_bdaf_6b61067bed50.slice/crio-38ad52fcdbd15179f3010d24b720b69d5631f44e0edfaa4c1ce1b8002d892531 WatchSource:0}: Error finding container 38ad52fcdbd15179f3010d24b720b69d5631f44e0edfaa4c1ce1b8002d892531: Status 404 returned error can't find the container with id 38ad52fcdbd15179f3010d24b720b69d5631f44e0edfaa4c1ce1b8002d892531 Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.552389 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.553159 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.557650 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.561578 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.563730 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.578526 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.578664 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.680078 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.680589 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.680211 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.711830 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.737114 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8b5b9"] Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.750095 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.894158 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9d86732b-befd-4a28-855b-3989f9821bdc-kube-api-access\") pod \"9d86732b-befd-4a28-855b-3989f9821bdc\" (UID: \"9d86732b-befd-4a28-855b-3989f9821bdc\") " Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.894313 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9d86732b-befd-4a28-855b-3989f9821bdc-kubelet-dir\") pod \"9d86732b-befd-4a28-855b-3989f9821bdc\" (UID: \"9d86732b-befd-4a28-855b-3989f9821bdc\") " Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.894720 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9d86732b-befd-4a28-855b-3989f9821bdc-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "9d86732b-befd-4a28-855b-3989f9821bdc" (UID: "9d86732b-befd-4a28-855b-3989f9821bdc"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.901368 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.903385 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d86732b-befd-4a28-855b-3989f9821bdc-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "9d86732b-befd-4a28-855b-3989f9821bdc" (UID: "9d86732b-befd-4a28-855b-3989f9821bdc"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.995825 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9d86732b-befd-4a28-855b-3989f9821bdc-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 15:19:09 crc kubenswrapper[4800]: I1125 15:19:09.995881 4800 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9d86732b-befd-4a28-855b-3989f9821bdc-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.325667 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:10 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:10 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:10 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.326264 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.335737 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.340378 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-dkmth" Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.460626 4800 generic.go:334] "Generic (PLEG): container finished" podID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" containerID="3b98b5622eadd85e59dedc3c3845152a56c6ebe952af5a52062dfc52cad814d3" exitCode=0 Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.460723 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8b5b9" event={"ID":"ac983522-6eeb-4141-a7f4-99e9f6f3b480","Type":"ContainerDied","Data":"3b98b5622eadd85e59dedc3c3845152a56c6ebe952af5a52062dfc52cad814d3"} Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.460757 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8b5b9" event={"ID":"ac983522-6eeb-4141-a7f4-99e9f6f3b480","Type":"ContainerStarted","Data":"ae526db6b0c2a79d646b55dc56b93903937123446f1e5d6bdbfcad093f58916d"} Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.475571 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.490261 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"9d86732b-befd-4a28-855b-3989f9821bdc","Type":"ContainerDied","Data":"ab1b1026cc29fa243a6a59e9489c77b6ab6be8a0094e321d6bb92d1fd7fc1df8"} Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.490307 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab1b1026cc29fa243a6a59e9489c77b6ab6be8a0094e321d6bb92d1fd7fc1df8" Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.490422 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.528562 4800 generic.go:334] "Generic (PLEG): container finished" podID="1b030df0-0b5c-4854-bdaf-6b61067bed50" containerID="ef4f01e91b56da431f5e5133c2f2e8897641a3997a41291fdf6af85bb402a81c" exitCode=0 Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.528801 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hpkx7" event={"ID":"1b030df0-0b5c-4854-bdaf-6b61067bed50","Type":"ContainerDied","Data":"ef4f01e91b56da431f5e5133c2f2e8897641a3997a41291fdf6af85bb402a81c"} Nov 25 15:19:10 crc kubenswrapper[4800]: I1125 15:19:10.528929 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hpkx7" event={"ID":"1b030df0-0b5c-4854-bdaf-6b61067bed50","Type":"ContainerStarted","Data":"38ad52fcdbd15179f3010d24b720b69d5631f44e0edfaa4c1ce1b8002d892531"} Nov 25 15:19:10 crc kubenswrapper[4800]: W1125 15:19:10.533609 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pode8dd7b45_b6ad_49d6_a6b0_94ab56357f7e.slice/crio-2e59142591606762be4117acade75385cf17819b0413d2d185aada47d994b4e2 WatchSource:0}: Error finding container 2e59142591606762be4117acade75385cf17819b0413d2d185aada47d994b4e2: Status 404 returned error can't find the container with id 2e59142591606762be4117acade75385cf17819b0413d2d185aada47d994b4e2 Nov 25 15:19:11 crc kubenswrapper[4800]: I1125 15:19:11.326920 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:11 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:11 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:11 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:11 crc kubenswrapper[4800]: I1125 15:19:11.327785 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:11 crc kubenswrapper[4800]: I1125 15:19:11.534418 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-gn26w" Nov 25 15:19:11 crc kubenswrapper[4800]: I1125 15:19:11.542609 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e","Type":"ContainerStarted","Data":"2e59142591606762be4117acade75385cf17819b0413d2d185aada47d994b4e2"} Nov 25 15:19:12 crc kubenswrapper[4800]: I1125 15:19:12.324262 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:12 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:12 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:12 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:12 crc kubenswrapper[4800]: I1125 15:19:12.324655 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:12 crc kubenswrapper[4800]: I1125 15:19:12.558641 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e","Type":"ContainerStarted","Data":"80298fccaf5cfed6c2ef9c38519cbde6e32cee392d473d6c9a3e2523c1017de5"} Nov 25 15:19:12 crc kubenswrapper[4800]: I1125 15:19:12.583000 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.58272689 podStartE2EDuration="3.58272689s" podCreationTimestamp="2025-11-25 15:19:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:19:12.571716452 +0000 UTC m=+113.626124934" watchObservedRunningTime="2025-11-25 15:19:12.58272689 +0000 UTC m=+113.637135372" Nov 25 15:19:13 crc kubenswrapper[4800]: I1125 15:19:13.323391 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:13 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:13 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:13 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:13 crc kubenswrapper[4800]: I1125 15:19:13.323484 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:13 crc kubenswrapper[4800]: I1125 15:19:13.620645 4800 generic.go:334] "Generic (PLEG): container finished" podID="e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e" containerID="80298fccaf5cfed6c2ef9c38519cbde6e32cee392d473d6c9a3e2523c1017de5" exitCode=0 Nov 25 15:19:13 crc kubenswrapper[4800]: I1125 15:19:13.620694 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e","Type":"ContainerDied","Data":"80298fccaf5cfed6c2ef9c38519cbde6e32cee392d473d6c9a3e2523c1017de5"} Nov 25 15:19:14 crc kubenswrapper[4800]: I1125 15:19:14.325875 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:14 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:14 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:14 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:14 crc kubenswrapper[4800]: I1125 15:19:14.325960 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:15 crc kubenswrapper[4800]: I1125 15:19:15.210124 4800 patch_prober.go:28] interesting pod/downloads-7954f5f757-q5x2z container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 25 15:19:15 crc kubenswrapper[4800]: I1125 15:19:15.210573 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q5x2z" podUID="6f015c93-38f5-4f11-9f72-6d99259e4058" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 25 15:19:15 crc kubenswrapper[4800]: I1125 15:19:15.210161 4800 patch_prober.go:28] interesting pod/downloads-7954f5f757-q5x2z container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 25 15:19:15 crc kubenswrapper[4800]: I1125 15:19:15.210751 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q5x2z" podUID="6f015c93-38f5-4f11-9f72-6d99259e4058" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 25 15:19:15 crc kubenswrapper[4800]: I1125 15:19:15.324360 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:15 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:15 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:15 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:15 crc kubenswrapper[4800]: I1125 15:19:15.324453 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:15 crc kubenswrapper[4800]: I1125 15:19:15.850679 4800 patch_prober.go:28] interesting pod/console-f9d7485db-sx8kw container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.29:8443/health\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Nov 25 15:19:15 crc kubenswrapper[4800]: I1125 15:19:15.850761 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-sx8kw" podUID="81f88e63-467c-4356-bb2b-b5aa9d93f512" containerName="console" probeResult="failure" output="Get \"https://10.217.0.29:8443/health\": dial tcp 10.217.0.29:8443: connect: connection refused" Nov 25 15:19:16 crc kubenswrapper[4800]: I1125 15:19:16.324824 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:16 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:16 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:16 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:16 crc kubenswrapper[4800]: I1125 15:19:16.324922 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:17 crc kubenswrapper[4800]: I1125 15:19:17.324477 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:17 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:17 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:17 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:17 crc kubenswrapper[4800]: I1125 15:19:17.324952 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:18 crc kubenswrapper[4800]: I1125 15:19:18.324142 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:18 crc kubenswrapper[4800]: [-]has-synced failed: reason withheld Nov 25 15:19:18 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:18 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:18 crc kubenswrapper[4800]: I1125 15:19:18.324210 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:19 crc kubenswrapper[4800]: I1125 15:19:19.327245 4800 patch_prober.go:28] interesting pod/router-default-5444994796-t52ch container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 15:19:19 crc kubenswrapper[4800]: [+]has-synced ok Nov 25 15:19:19 crc kubenswrapper[4800]: [+]process-running ok Nov 25 15:19:19 crc kubenswrapper[4800]: healthz check failed Nov 25 15:19:19 crc kubenswrapper[4800]: I1125 15:19:19.327345 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t52ch" podUID="f9b17177-8d45-46be-84cd-13a0613df952" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 15:19:20 crc kubenswrapper[4800]: I1125 15:19:20.324107 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:19:20 crc kubenswrapper[4800]: I1125 15:19:20.328277 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-t52ch" Nov 25 15:19:21 crc kubenswrapper[4800]: I1125 15:19:21.260867 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 15:19:21 crc kubenswrapper[4800]: I1125 15:19:21.328495 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e-kube-api-access\") pod \"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e\" (UID: \"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e\") " Nov 25 15:19:21 crc kubenswrapper[4800]: I1125 15:19:21.329138 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e-kubelet-dir\") pod \"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e\" (UID: \"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e\") " Nov 25 15:19:21 crc kubenswrapper[4800]: I1125 15:19:21.329721 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e" (UID: "e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:19:21 crc kubenswrapper[4800]: I1125 15:19:21.336244 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e" (UID: "e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:19:21 crc kubenswrapper[4800]: I1125 15:19:21.430827 4800 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 15:19:21 crc kubenswrapper[4800]: I1125 15:19:21.430905 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 15:19:21 crc kubenswrapper[4800]: I1125 15:19:21.684821 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e","Type":"ContainerDied","Data":"2e59142591606762be4117acade75385cf17819b0413d2d185aada47d994b4e2"} Nov 25 15:19:21 crc kubenswrapper[4800]: I1125 15:19:21.684921 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e59142591606762be4117acade75385cf17819b0413d2d185aada47d994b4e2" Nov 25 15:19:21 crc kubenswrapper[4800]: I1125 15:19:21.684954 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 15:19:25 crc kubenswrapper[4800]: I1125 15:19:25.231433 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-q5x2z" Nov 25 15:19:25 crc kubenswrapper[4800]: I1125 15:19:25.852929 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:19:25 crc kubenswrapper[4800]: I1125 15:19:25.858724 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:19:27 crc kubenswrapper[4800]: I1125 15:19:27.094909 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:19:36 crc kubenswrapper[4800]: I1125 15:19:36.158147 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w2lpp" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.474987 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 15:19:44 crc kubenswrapper[4800]: E1125 15:19:44.476618 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e" containerName="pruner" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.476717 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e" containerName="pruner" Nov 25 15:19:44 crc kubenswrapper[4800]: E1125 15:19:44.476805 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d86732b-befd-4a28-855b-3989f9821bdc" containerName="pruner" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.476908 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d86732b-befd-4a28-855b-3989f9821bdc" containerName="pruner" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.477121 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d86732b-befd-4a28-855b-3989f9821bdc" containerName="pruner" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.477212 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8dd7b45-b6ad-49d6-a6b0-94ab56357f7e" containerName="pruner" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.477767 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.484221 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.484519 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.489377 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.565096 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dbb52e79-5a46-452b-9124-dd6449a1de3a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"dbb52e79-5a46-452b-9124-dd6449a1de3a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.565191 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dbb52e79-5a46-452b-9124-dd6449a1de3a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"dbb52e79-5a46-452b-9124-dd6449a1de3a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.666578 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dbb52e79-5a46-452b-9124-dd6449a1de3a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"dbb52e79-5a46-452b-9124-dd6449a1de3a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.666673 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dbb52e79-5a46-452b-9124-dd6449a1de3a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"dbb52e79-5a46-452b-9124-dd6449a1de3a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.666762 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dbb52e79-5a46-452b-9124-dd6449a1de3a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"dbb52e79-5a46-452b-9124-dd6449a1de3a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.683072 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dbb52e79-5a46-452b-9124-dd6449a1de3a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"dbb52e79-5a46-452b-9124-dd6449a1de3a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 15:19:44 crc kubenswrapper[4800]: I1125 15:19:44.814444 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 15:19:46 crc kubenswrapper[4800]: E1125 15:19:46.049960 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 15:19:46 crc kubenswrapper[4800]: E1125 15:19:46.050191 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8tq67,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-7bnkb_openshift-marketplace(5242e24a-a670-4da1-ad3c-4b13d7b84b6d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 15:19:46 crc kubenswrapper[4800]: E1125 15:19:46.051357 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-7bnkb" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" Nov 25 15:19:46 crc kubenswrapper[4800]: E1125 15:19:46.136791 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 15:19:46 crc kubenswrapper[4800]: E1125 15:19:46.136971 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-knkrs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-4mb2k_openshift-marketplace(16615745-a673-44e3-8cd7-980d59c421ad): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 15:19:46 crc kubenswrapper[4800]: E1125 15:19:46.139079 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-4mb2k" podUID="16615745-a673-44e3-8cd7-980d59c421ad" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.628462 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.628576 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.628625 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.628666 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.631950 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.632434 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.633221 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.642461 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.647385 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.654829 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.654907 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.743628 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.803599 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:19:48 crc kubenswrapper[4800]: I1125 15:19:48.969663 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:19:49 crc kubenswrapper[4800]: I1125 15:19:49.112510 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 15:19:49 crc kubenswrapper[4800]: E1125 15:19:49.751751 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-7bnkb" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" Nov 25 15:19:49 crc kubenswrapper[4800]: E1125 15:19:49.751905 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-4mb2k" podUID="16615745-a673-44e3-8cd7-980d59c421ad" Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.471922 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.472980 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.488478 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.657379 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2af2b622-17be-49af-8adc-4ba183fb2e99-kubelet-dir\") pod \"installer-9-crc\" (UID: \"2af2b622-17be-49af-8adc-4ba183fb2e99\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.657426 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2af2b622-17be-49af-8adc-4ba183fb2e99-var-lock\") pod \"installer-9-crc\" (UID: \"2af2b622-17be-49af-8adc-4ba183fb2e99\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.657468 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2af2b622-17be-49af-8adc-4ba183fb2e99-kube-api-access\") pod \"installer-9-crc\" (UID: \"2af2b622-17be-49af-8adc-4ba183fb2e99\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.759247 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2af2b622-17be-49af-8adc-4ba183fb2e99-kubelet-dir\") pod \"installer-9-crc\" (UID: \"2af2b622-17be-49af-8adc-4ba183fb2e99\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.759323 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2af2b622-17be-49af-8adc-4ba183fb2e99-var-lock\") pod \"installer-9-crc\" (UID: \"2af2b622-17be-49af-8adc-4ba183fb2e99\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.759358 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2af2b622-17be-49af-8adc-4ba183fb2e99-kube-api-access\") pod \"installer-9-crc\" (UID: \"2af2b622-17be-49af-8adc-4ba183fb2e99\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.759398 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2af2b622-17be-49af-8adc-4ba183fb2e99-kubelet-dir\") pod \"installer-9-crc\" (UID: \"2af2b622-17be-49af-8adc-4ba183fb2e99\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.759506 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2af2b622-17be-49af-8adc-4ba183fb2e99-var-lock\") pod \"installer-9-crc\" (UID: \"2af2b622-17be-49af-8adc-4ba183fb2e99\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.781831 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2af2b622-17be-49af-8adc-4ba183fb2e99-kube-api-access\") pod \"installer-9-crc\" (UID: \"2af2b622-17be-49af-8adc-4ba183fb2e99\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:19:50 crc kubenswrapper[4800]: I1125 15:19:50.792497 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:20:06 crc kubenswrapper[4800]: E1125 15:20:06.078126 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 15:20:06 crc kubenswrapper[4800]: E1125 15:20:06.078764 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rt2v2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-6td7n_openshift-marketplace(932f12b1-d6ce-4e42-b70f-6cd51c1082a1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 15:20:06 crc kubenswrapper[4800]: E1125 15:20:06.080302 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-6td7n" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" Nov 25 15:20:09 crc kubenswrapper[4800]: E1125 15:20:09.739458 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-6td7n" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" Nov 25 15:20:12 crc kubenswrapper[4800]: I1125 15:20:12.639659 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:20:12 crc kubenswrapper[4800]: I1125 15:20:12.640004 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:20:13 crc kubenswrapper[4800]: E1125 15:20:13.193137 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 15:20:13 crc kubenswrapper[4800]: E1125 15:20:13.193311 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tfpp5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-8b5b9_openshift-marketplace(ac983522-6eeb-4141-a7f4-99e9f6f3b480): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 15:20:13 crc kubenswrapper[4800]: E1125 15:20:13.194585 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-8b5b9" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" Nov 25 15:20:13 crc kubenswrapper[4800]: I1125 15:20:13.762822 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 15:20:13 crc kubenswrapper[4800]: I1125 15:20:13.881817 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 15:20:13 crc kubenswrapper[4800]: W1125 15:20:13.885999 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-4a90b9b8c235786fbe0f316dc3db6fe1ba016bd16a93333a17a3bd5e85f15048 WatchSource:0}: Error finding container 4a90b9b8c235786fbe0f316dc3db6fe1ba016bd16a93333a17a3bd5e85f15048: Status 404 returned error can't find the container with id 4a90b9b8c235786fbe0f316dc3db6fe1ba016bd16a93333a17a3bd5e85f15048 Nov 25 15:20:13 crc kubenswrapper[4800]: W1125 15:20:13.900507 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poddbb52e79_5a46_452b_9124_dd6449a1de3a.slice/crio-dd635e930a5ff369373110318779e2d71ab59b12b011781d847b07d7e093f0bc WatchSource:0}: Error finding container dd635e930a5ff369373110318779e2d71ab59b12b011781d847b07d7e093f0bc: Status 404 returned error can't find the container with id dd635e930a5ff369373110318779e2d71ab59b12b011781d847b07d7e093f0bc Nov 25 15:20:13 crc kubenswrapper[4800]: I1125 15:20:13.988002 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"dbb52e79-5a46-452b-9124-dd6449a1de3a","Type":"ContainerStarted","Data":"dd635e930a5ff369373110318779e2d71ab59b12b011781d847b07d7e093f0bc"} Nov 25 15:20:13 crc kubenswrapper[4800]: I1125 15:20:13.989276 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"2af2b622-17be-49af-8adc-4ba183fb2e99","Type":"ContainerStarted","Data":"8efb654b701b91fe3dd201ff8f4b461776c95c43115185660ee9af1426b59afa"} Nov 25 15:20:13 crc kubenswrapper[4800]: I1125 15:20:13.990563 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"4a90b9b8c235786fbe0f316dc3db6fe1ba016bd16a93333a17a3bd5e85f15048"} Nov 25 15:20:13 crc kubenswrapper[4800]: I1125 15:20:13.991865 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"ca7646e1f4a4397fe824d7b84a3312f16c1ff5151032ce54be233c4ee2cfd416"} Nov 25 15:20:13 crc kubenswrapper[4800]: I1125 15:20:13.993375 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"671d221ff3cc5353b17937cd5d97bde3ce371076a8b612aade8001d1583df7ee"} Nov 25 15:20:13 crc kubenswrapper[4800]: E1125 15:20:13.994049 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-8b5b9" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" Nov 25 15:20:16 crc kubenswrapper[4800]: I1125 15:20:16.007616 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"903b0617dbba775bad98f62cfd856d6cb8c75bbdb571f3157bcf7568031de0c3"} Nov 25 15:20:16 crc kubenswrapper[4800]: E1125 15:20:16.336879 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 15:20:16 crc kubenswrapper[4800]: E1125 15:20:16.337094 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-shs9d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-xgrss_openshift-marketplace(40b1358b-2b78-4d92-8e03-baf11a6aecde): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 15:20:16 crc kubenswrapper[4800]: E1125 15:20:16.338390 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-xgrss" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" Nov 25 15:20:17 crc kubenswrapper[4800]: I1125 15:20:17.014661 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"dbb52e79-5a46-452b-9124-dd6449a1de3a","Type":"ContainerStarted","Data":"1ae4116623e38f0b4b36a65fb3fb182ea9c1b56001dc9ab1841184b0dac369ee"} Nov 25 15:20:17 crc kubenswrapper[4800]: I1125 15:20:17.017013 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"2af2b622-17be-49af-8adc-4ba183fb2e99","Type":"ContainerStarted","Data":"fa7374c64c2c62cdff3dd72421c96b15c3ec173a62da970abab3fc2dc34907f0"} Nov 25 15:20:17 crc kubenswrapper[4800]: I1125 15:20:17.019117 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"3a48a0fefc12cef7bc4f62da7c311569aecfdcf461557ab0b1bdcdb155936101"} Nov 25 15:20:17 crc kubenswrapper[4800]: I1125 15:20:17.020909 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"f071818ec2062ac461eb5feb9d097a9688c25a036729b066f6d029a59a10c314"} Nov 25 15:20:17 crc kubenswrapper[4800]: E1125 15:20:17.023793 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-xgrss" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" Nov 25 15:20:17 crc kubenswrapper[4800]: I1125 15:20:17.033516 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=33.033494828 podStartE2EDuration="33.033494828s" podCreationTimestamp="2025-11-25 15:19:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:20:17.031329263 +0000 UTC m=+178.085737735" watchObservedRunningTime="2025-11-25 15:20:17.033494828 +0000 UTC m=+178.087903320" Nov 25 15:20:17 crc kubenswrapper[4800]: I1125 15:20:17.113032 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=27.113018327 podStartE2EDuration="27.113018327s" podCreationTimestamp="2025-11-25 15:19:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:20:17.110097253 +0000 UTC m=+178.164505735" watchObservedRunningTime="2025-11-25 15:20:17.113018327 +0000 UTC m=+178.167426809" Nov 25 15:20:18 crc kubenswrapper[4800]: I1125 15:20:18.032911 4800 generic.go:334] "Generic (PLEG): container finished" podID="dbb52e79-5a46-452b-9124-dd6449a1de3a" containerID="1ae4116623e38f0b4b36a65fb3fb182ea9c1b56001dc9ab1841184b0dac369ee" exitCode=0 Nov 25 15:20:18 crc kubenswrapper[4800]: I1125 15:20:18.033330 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"dbb52e79-5a46-452b-9124-dd6449a1de3a","Type":"ContainerDied","Data":"1ae4116623e38f0b4b36a65fb3fb182ea9c1b56001dc9ab1841184b0dac369ee"} Nov 25 15:20:18 crc kubenswrapper[4800]: E1125 15:20:18.414744 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 15:20:18 crc kubenswrapper[4800]: E1125 15:20:18.414985 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xpdxj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-fx96n_openshift-marketplace(79762218-0d90-43f8-a512-a9b95dd3486e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 15:20:18 crc kubenswrapper[4800]: E1125 15:20:18.416293 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-fx96n" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" Nov 25 15:20:18 crc kubenswrapper[4800]: E1125 15:20:18.446499 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 15:20:18 crc kubenswrapper[4800]: E1125 15:20:18.446654 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w9gkv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-8clxk_openshift-marketplace(00b2c35b-aea8-40f1-af86-ab2ca005e90c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 15:20:18 crc kubenswrapper[4800]: E1125 15:20:18.447871 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-8clxk" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" Nov 25 15:20:18 crc kubenswrapper[4800]: E1125 15:20:18.458256 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 15:20:18 crc kubenswrapper[4800]: E1125 15:20:18.458410 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6fm5b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-hpkx7_openshift-marketplace(1b030df0-0b5c-4854-bdaf-6b61067bed50): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 15:20:18 crc kubenswrapper[4800]: E1125 15:20:18.459577 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-hpkx7" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" Nov 25 15:20:18 crc kubenswrapper[4800]: I1125 15:20:18.804139 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:20:19 crc kubenswrapper[4800]: E1125 15:20:19.041738 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-fx96n" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" Nov 25 15:20:19 crc kubenswrapper[4800]: E1125 15:20:19.041797 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-hpkx7" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" Nov 25 15:20:19 crc kubenswrapper[4800]: E1125 15:20:19.041819 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-8clxk" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" Nov 25 15:20:19 crc kubenswrapper[4800]: I1125 15:20:19.287425 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 15:20:19 crc kubenswrapper[4800]: I1125 15:20:19.478074 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dbb52e79-5a46-452b-9124-dd6449a1de3a-kube-api-access\") pod \"dbb52e79-5a46-452b-9124-dd6449a1de3a\" (UID: \"dbb52e79-5a46-452b-9124-dd6449a1de3a\") " Nov 25 15:20:19 crc kubenswrapper[4800]: I1125 15:20:19.478151 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dbb52e79-5a46-452b-9124-dd6449a1de3a-kubelet-dir\") pod \"dbb52e79-5a46-452b-9124-dd6449a1de3a\" (UID: \"dbb52e79-5a46-452b-9124-dd6449a1de3a\") " Nov 25 15:20:19 crc kubenswrapper[4800]: I1125 15:20:19.478289 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dbb52e79-5a46-452b-9124-dd6449a1de3a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "dbb52e79-5a46-452b-9124-dd6449a1de3a" (UID: "dbb52e79-5a46-452b-9124-dd6449a1de3a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:20:19 crc kubenswrapper[4800]: I1125 15:20:19.478477 4800 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dbb52e79-5a46-452b-9124-dd6449a1de3a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 15:20:19 crc kubenswrapper[4800]: I1125 15:20:19.485996 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbb52e79-5a46-452b-9124-dd6449a1de3a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "dbb52e79-5a46-452b-9124-dd6449a1de3a" (UID: "dbb52e79-5a46-452b-9124-dd6449a1de3a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:20:19 crc kubenswrapper[4800]: I1125 15:20:19.579775 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dbb52e79-5a46-452b-9124-dd6449a1de3a-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 15:20:20 crc kubenswrapper[4800]: I1125 15:20:20.050684 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"dbb52e79-5a46-452b-9124-dd6449a1de3a","Type":"ContainerDied","Data":"dd635e930a5ff369373110318779e2d71ab59b12b011781d847b07d7e093f0bc"} Nov 25 15:20:20 crc kubenswrapper[4800]: I1125 15:20:20.050728 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd635e930a5ff369373110318779e2d71ab59b12b011781d847b07d7e093f0bc" Nov 25 15:20:20 crc kubenswrapper[4800]: I1125 15:20:20.050796 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 15:20:34 crc kubenswrapper[4800]: I1125 15:20:34.154017 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6td7n" event={"ID":"932f12b1-d6ce-4e42-b70f-6cd51c1082a1","Type":"ContainerStarted","Data":"7a6196573c5b29c508175fee6f2e9a12bc9126a778a71a9f1d1b33666f3604d4"} Nov 25 15:20:34 crc kubenswrapper[4800]: I1125 15:20:34.156944 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mb2k" event={"ID":"16615745-a673-44e3-8cd7-980d59c421ad","Type":"ContainerStarted","Data":"5b09b23bbbe226e33cc0bab9313406175dd8aa46bb79553c35896ba89cb78c21"} Nov 25 15:20:34 crc kubenswrapper[4800]: I1125 15:20:34.159683 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7bnkb" event={"ID":"5242e24a-a670-4da1-ad3c-4b13d7b84b6d","Type":"ContainerStarted","Data":"e373feee9c9804bd3082499b0b7fa1fc7fa640aa487f32448da4edf4a9d20303"} Nov 25 15:20:35 crc kubenswrapper[4800]: I1125 15:20:35.171237 4800 generic.go:334] "Generic (PLEG): container finished" podID="16615745-a673-44e3-8cd7-980d59c421ad" containerID="5b09b23bbbe226e33cc0bab9313406175dd8aa46bb79553c35896ba89cb78c21" exitCode=0 Nov 25 15:20:35 crc kubenswrapper[4800]: I1125 15:20:35.171342 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mb2k" event={"ID":"16615745-a673-44e3-8cd7-980d59c421ad","Type":"ContainerDied","Data":"5b09b23bbbe226e33cc0bab9313406175dd8aa46bb79553c35896ba89cb78c21"} Nov 25 15:20:35 crc kubenswrapper[4800]: I1125 15:20:35.176238 4800 generic.go:334] "Generic (PLEG): container finished" podID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" containerID="e373feee9c9804bd3082499b0b7fa1fc7fa640aa487f32448da4edf4a9d20303" exitCode=0 Nov 25 15:20:35 crc kubenswrapper[4800]: I1125 15:20:35.176342 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7bnkb" event={"ID":"5242e24a-a670-4da1-ad3c-4b13d7b84b6d","Type":"ContainerDied","Data":"e373feee9c9804bd3082499b0b7fa1fc7fa640aa487f32448da4edf4a9d20303"} Nov 25 15:20:35 crc kubenswrapper[4800]: I1125 15:20:35.178916 4800 generic.go:334] "Generic (PLEG): container finished" podID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" containerID="7a6196573c5b29c508175fee6f2e9a12bc9126a778a71a9f1d1b33666f3604d4" exitCode=0 Nov 25 15:20:35 crc kubenswrapper[4800]: I1125 15:20:35.178975 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6td7n" event={"ID":"932f12b1-d6ce-4e42-b70f-6cd51c1082a1","Type":"ContainerDied","Data":"7a6196573c5b29c508175fee6f2e9a12bc9126a778a71a9f1d1b33666f3604d4"} Nov 25 15:20:42 crc kubenswrapper[4800]: I1125 15:20:42.640657 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:20:42 crc kubenswrapper[4800]: I1125 15:20:42.641679 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:20:48 crc kubenswrapper[4800]: I1125 15:20:48.813359 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.056672 4800 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.057897 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9" gracePeriod=15 Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.057951 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23" gracePeriod=15 Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.058102 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53" gracePeriod=15 Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.058112 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0" gracePeriod=15 Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.058959 4800 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 15:20:54 crc kubenswrapper[4800]: E1125 15:20:54.059217 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059232 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 15:20:54 crc kubenswrapper[4800]: E1125 15:20:54.059242 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059249 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 15:20:54 crc kubenswrapper[4800]: E1125 15:20:54.059264 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059272 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 15:20:54 crc kubenswrapper[4800]: E1125 15:20:54.059284 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059292 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 15:20:54 crc kubenswrapper[4800]: E1125 15:20:54.059301 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059308 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 15:20:54 crc kubenswrapper[4800]: E1125 15:20:54.059321 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059328 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 15:20:54 crc kubenswrapper[4800]: E1125 15:20:54.059343 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbb52e79-5a46-452b-9124-dd6449a1de3a" containerName="pruner" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059351 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbb52e79-5a46-452b-9124-dd6449a1de3a" containerName="pruner" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.058154 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba" gracePeriod=15 Nov 25 15:20:54 crc kubenswrapper[4800]: E1125 15:20:54.059364 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059522 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059821 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbb52e79-5a46-452b-9124-dd6449a1de3a" containerName="pruner" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059872 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059894 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059911 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059928 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059946 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059959 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.059974 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 15:20:54 crc kubenswrapper[4800]: E1125 15:20:54.060203 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.060220 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.062398 4800 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.063295 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.073976 4800 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.105146 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.105704 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.105966 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.106151 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.106417 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.106602 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.106758 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.107005 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.124665 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.208439 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.208765 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.208883 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.208948 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.208926 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.208540 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.209102 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.209107 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.209196 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.209229 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.209248 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.209264 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.209348 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.209383 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.209393 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.209509 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:54 crc kubenswrapper[4800]: I1125 15:20:54.421694 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:20:55 crc kubenswrapper[4800]: I1125 15:20:55.316887 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 15:20:55 crc kubenswrapper[4800]: I1125 15:20:55.318711 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 15:20:55 crc kubenswrapper[4800]: I1125 15:20:55.319511 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0" exitCode=2 Nov 25 15:20:58 crc kubenswrapper[4800]: I1125 15:20:58.341018 4800 generic.go:334] "Generic (PLEG): container finished" podID="2af2b622-17be-49af-8adc-4ba183fb2e99" containerID="fa7374c64c2c62cdff3dd72421c96b15c3ec173a62da970abab3fc2dc34907f0" exitCode=0 Nov 25 15:20:58 crc kubenswrapper[4800]: I1125 15:20:58.341157 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"2af2b622-17be-49af-8adc-4ba183fb2e99","Type":"ContainerDied","Data":"fa7374c64c2c62cdff3dd72421c96b15c3ec173a62da970abab3fc2dc34907f0"} Nov 25 15:20:58 crc kubenswrapper[4800]: I1125 15:20:58.342681 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:20:58 crc kubenswrapper[4800]: I1125 15:20:58.344027 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 15:20:58 crc kubenswrapper[4800]: I1125 15:20:58.344812 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:20:58 crc kubenswrapper[4800]: I1125 15:20:58.346375 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 15:20:58 crc kubenswrapper[4800]: I1125 15:20:58.347510 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23" exitCode=0 Nov 25 15:20:58 crc kubenswrapper[4800]: I1125 15:20:58.347545 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba" exitCode=0 Nov 25 15:20:58 crc kubenswrapper[4800]: I1125 15:20:58.347559 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53" exitCode=0 Nov 25 15:20:58 crc kubenswrapper[4800]: I1125 15:20:58.347586 4800 scope.go:117] "RemoveContainer" containerID="25c45967475a96fd3336f7171d13469e3f022f88b23cf1d1737263dbd8f394ff" Nov 25 15:20:59 crc kubenswrapper[4800]: I1125 15:20:59.362295 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 15:20:59 crc kubenswrapper[4800]: I1125 15:20:59.363774 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9" exitCode=0 Nov 25 15:20:59 crc kubenswrapper[4800]: I1125 15:20:59.787483 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:20:59 crc kubenswrapper[4800]: I1125 15:20:59.788097 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:00 crc kubenswrapper[4800]: E1125 15:21:00.216483 4800 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:00 crc kubenswrapper[4800]: E1125 15:21:00.217416 4800 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:00 crc kubenswrapper[4800]: E1125 15:21:00.217908 4800 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:00 crc kubenswrapper[4800]: E1125 15:21:00.218457 4800 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:00 crc kubenswrapper[4800]: E1125 15:21:00.219607 4800 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:00 crc kubenswrapper[4800]: I1125 15:21:00.219688 4800 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 25 15:21:00 crc kubenswrapper[4800]: E1125 15:21:00.220385 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="200ms" Nov 25 15:21:00 crc kubenswrapper[4800]: E1125 15:21:00.421207 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="400ms" Nov 25 15:21:00 crc kubenswrapper[4800]: E1125 15:21:00.822555 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="800ms" Nov 25 15:21:01 crc kubenswrapper[4800]: E1125 15:21:01.624253 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="1.6s" Nov 25 15:21:01 crc kubenswrapper[4800]: E1125 15:21:01.719692 4800 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.145:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-marketplace-fx96n.187b491b8caaa8dd openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-marketplace-fx96n,UID:79762218-0d90-43f8-a512-a9b95dd3486e,APIVersion:v1,ResourceVersion:28570,FieldPath:spec.initContainers{extract-content},},Reason:Pulled,Message:Successfully pulled image \"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\" in 28.603s (28.603s including waiting). Image size: 1118510475 bytes.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 15:21:01.718620381 +0000 UTC m=+222.773028903,LastTimestamp:2025-11-25 15:21:01.718620381 +0000 UTC m=+222.773028903,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 15:21:01 crc kubenswrapper[4800]: I1125 15:21:01.818659 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:21:01 crc kubenswrapper[4800]: I1125 15:21:01.819711 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:01 crc kubenswrapper[4800]: I1125 15:21:01.820476 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:01 crc kubenswrapper[4800]: I1125 15:21:01.856453 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2af2b622-17be-49af-8adc-4ba183fb2e99-var-lock\") pod \"2af2b622-17be-49af-8adc-4ba183fb2e99\" (UID: \"2af2b622-17be-49af-8adc-4ba183fb2e99\") " Nov 25 15:21:01 crc kubenswrapper[4800]: I1125 15:21:01.856618 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2af2b622-17be-49af-8adc-4ba183fb2e99-var-lock" (OuterVolumeSpecName: "var-lock") pod "2af2b622-17be-49af-8adc-4ba183fb2e99" (UID: "2af2b622-17be-49af-8adc-4ba183fb2e99"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:21:01 crc kubenswrapper[4800]: I1125 15:21:01.856690 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2af2b622-17be-49af-8adc-4ba183fb2e99-kube-api-access\") pod \"2af2b622-17be-49af-8adc-4ba183fb2e99\" (UID: \"2af2b622-17be-49af-8adc-4ba183fb2e99\") " Nov 25 15:21:01 crc kubenswrapper[4800]: I1125 15:21:01.856810 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2af2b622-17be-49af-8adc-4ba183fb2e99-kubelet-dir\") pod \"2af2b622-17be-49af-8adc-4ba183fb2e99\" (UID: \"2af2b622-17be-49af-8adc-4ba183fb2e99\") " Nov 25 15:21:01 crc kubenswrapper[4800]: I1125 15:21:01.857037 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2af2b622-17be-49af-8adc-4ba183fb2e99-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "2af2b622-17be-49af-8adc-4ba183fb2e99" (UID: "2af2b622-17be-49af-8adc-4ba183fb2e99"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:21:01 crc kubenswrapper[4800]: I1125 15:21:01.857577 4800 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2af2b622-17be-49af-8adc-4ba183fb2e99-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 15:21:01 crc kubenswrapper[4800]: I1125 15:21:01.857601 4800 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2af2b622-17be-49af-8adc-4ba183fb2e99-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 15:21:01 crc kubenswrapper[4800]: I1125 15:21:01.867473 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2af2b622-17be-49af-8adc-4ba183fb2e99-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "2af2b622-17be-49af-8adc-4ba183fb2e99" (UID: "2af2b622-17be-49af-8adc-4ba183fb2e99"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:21:01 crc kubenswrapper[4800]: I1125 15:21:01.958770 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2af2b622-17be-49af-8adc-4ba183fb2e99-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 15:21:02 crc kubenswrapper[4800]: I1125 15:21:02.398492 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"2af2b622-17be-49af-8adc-4ba183fb2e99","Type":"ContainerDied","Data":"8efb654b701b91fe3dd201ff8f4b461776c95c43115185660ee9af1426b59afa"} Nov 25 15:21:02 crc kubenswrapper[4800]: I1125 15:21:02.399100 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8efb654b701b91fe3dd201ff8f4b461776c95c43115185660ee9af1426b59afa" Nov 25 15:21:02 crc kubenswrapper[4800]: I1125 15:21:02.398584 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 15:21:02 crc kubenswrapper[4800]: I1125 15:21:02.422952 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:02 crc kubenswrapper[4800]: I1125 15:21:02.424111 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:03 crc kubenswrapper[4800]: E1125 15:21:03.224895 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="3.2s" Nov 25 15:21:06 crc kubenswrapper[4800]: E1125 15:21:06.426489 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="6.4s" Nov 25 15:21:08 crc kubenswrapper[4800]: E1125 15:21:08.442425 4800 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.145:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-marketplace-fx96n.187b491b8caaa8dd openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-marketplace-fx96n,UID:79762218-0d90-43f8-a512-a9b95dd3486e,APIVersion:v1,ResourceVersion:28570,FieldPath:spec.initContainers{extract-content},},Reason:Pulled,Message:Successfully pulled image \"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\" in 28.603s (28.603s including waiting). Image size: 1118510475 bytes.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 15:21:01.718620381 +0000 UTC m=+222.773028903,LastTimestamp:2025-11-25 15:21:01.718620381 +0000 UTC m=+222.773028903,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.451423 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.455297 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.455346 4800 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799" exitCode=1 Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.455386 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799"} Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.456082 4800 scope.go:117] "RemoveContainer" containerID="17f8cb5c3947a9d8b193e08f0f41fe858b6c7bbcb91e07ba104cd2200ce7a799" Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.456672 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.457133 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.457789 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.878928 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.880592 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.881558 4800 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.882218 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.882794 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:08 crc kubenswrapper[4800]: I1125 15:21:08.883451 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.065082 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.065205 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.065230 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.065315 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.065393 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.065449 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.065773 4800 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.065798 4800 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.065810 4800 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.467038 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.468177 4800 scope.go:117] "RemoveContainer" containerID="0cdb963c357d81a5a40dd8d3091a826bf030bc17a46f43f2a8e4bd44651eff23" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.468380 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.493450 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.494334 4800 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.494863 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.497162 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.666009 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.794838 4800 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.795772 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.796321 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.797336 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 25 15:21:09 crc kubenswrapper[4800]: I1125 15:21:09.797678 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:10 crc kubenswrapper[4800]: I1125 15:21:10.275775 4800 scope.go:117] "RemoveContainer" containerID="75a211446d3064a47e2edb68f423025e8da8e45b82b9d673c28d7043dcfd0fba" Nov 25 15:21:10 crc kubenswrapper[4800]: I1125 15:21:10.331085 4800 scope.go:117] "RemoveContainer" containerID="e760d39629e4ce33d9ca4c78fb1b608a1ce25afa6fcfcff8ce92d451f9233c53" Nov 25 15:21:10 crc kubenswrapper[4800]: W1125 15:21:10.349345 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-149aa5faaacbf56fce81abbd504c8d26d4b3c7987f695ca719d6ea53cb7e6d46 WatchSource:0}: Error finding container 149aa5faaacbf56fce81abbd504c8d26d4b3c7987f695ca719d6ea53cb7e6d46: Status 404 returned error can't find the container with id 149aa5faaacbf56fce81abbd504c8d26d4b3c7987f695ca719d6ea53cb7e6d46 Nov 25 15:21:10 crc kubenswrapper[4800]: I1125 15:21:10.479036 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 15:21:10 crc kubenswrapper[4800]: I1125 15:21:10.482364 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"149aa5faaacbf56fce81abbd504c8d26d4b3c7987f695ca719d6ea53cb7e6d46"} Nov 25 15:21:10 crc kubenswrapper[4800]: I1125 15:21:10.486524 4800 scope.go:117] "RemoveContainer" containerID="ff1b9a9291f81a07dd8a1f509eaa5fa9e4837c175464c96f009dbf8076d405e0" Nov 25 15:21:10 crc kubenswrapper[4800]: I1125 15:21:10.524439 4800 scope.go:117] "RemoveContainer" containerID="f5e12fbd9478977bf0b6af492499b0de03d51da7ab899cd6c50004c1adb1b5e9" Nov 25 15:21:10 crc kubenswrapper[4800]: I1125 15:21:10.572811 4800 scope.go:117] "RemoveContainer" containerID="174c70ca2723c38e2217fb2359b4282490e90b0a499620bca07fd7419b422475" Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.489988 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6td7n" event={"ID":"932f12b1-d6ce-4e42-b70f-6cd51c1082a1","Type":"ContainerStarted","Data":"fe67d5fcee66a87c3ab1b2cdedbf1d6aeddd0fcdccede724fd53207caee118df"} Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.491560 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.491959 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.492295 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.492630 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.493443 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"74393ed9369462499643a10c6a12d5e2ad4aa32b6841bd461c9bdb77e3bfd5e6"} Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.495279 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx96n" event={"ID":"79762218-0d90-43f8-a512-a9b95dd3486e","Type":"ContainerStarted","Data":"b5b16d9802a93f4f36366080d09ede24053e2c077df74f9dd7dce9a4c692637f"} Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.497144 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgrss" event={"ID":"40b1358b-2b78-4d92-8e03-baf11a6aecde","Type":"ContainerStarted","Data":"200db8e86b2e222c8d22631fd57c0b9ff654d16ab0c516b8462b253c6fc8c688"} Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.499651 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7bnkb" event={"ID":"5242e24a-a670-4da1-ad3c-4b13d7b84b6d","Type":"ContainerStarted","Data":"24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a"} Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.501985 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hpkx7" event={"ID":"1b030df0-0b5c-4854-bdaf-6b61067bed50","Type":"ContainerStarted","Data":"e5f20085dab30490c37f3c9abb2e2eb628176e82622436b360defef73c46b500"} Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.504818 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8b5b9" event={"ID":"ac983522-6eeb-4141-a7f4-99e9f6f3b480","Type":"ContainerStarted","Data":"e153f8c280f74a0a15fb88ea9bd24b3e33abe19e1349c67a6e2cc547ad8a1915"} Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.508190 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.508320 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"bd7c5671b0d6973227804d7c511a2b61e17edb06fdcf1861b88e2bbd9bc9a2d0"} Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.510565 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mb2k" event={"ID":"16615745-a673-44e3-8cd7-980d59c421ad","Type":"ContainerStarted","Data":"ef0604839604a02c74f9c7e61e87ec37a9d2b967d655b06cca1f4c15236b37c7"} Nov 25 15:21:11 crc kubenswrapper[4800]: I1125 15:21:11.512302 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8clxk" event={"ID":"00b2c35b-aea8-40f1-af86-ab2ca005e90c","Type":"ContainerStarted","Data":"a76270452566ed60933d6ada9643e50a015dd390e9fdec8cc463a8d7629493a3"} Nov 25 15:21:12 crc kubenswrapper[4800]: E1125 15:21:12.297651 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:21:12Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:21:12Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:21:12Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T15:21:12Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[],\\\"sizeBytes\\\":1603154853},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:164f3a157984a21fa040de454eb6ea8de62d91acb99a1586c15923b94c94b3e3\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:525d9b0a71d2e4fc1356afcde5f0cf6552f0cb01e4da37a912e291c4e4129a3c\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1205450133},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1aa29d6288f6ef5d880bfd9dfb98d1aef8f1c4992d339d94efd16f877058d6ea\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3abf1f213c6c985ee3b015b741433a2ee7f41f04dc156447ec226c3a24518ac8\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1188148634},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c1d9b3b6b8ec84a6a7b0c70bced2baac180321427fee36bb3a9037b16be31340\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c49729203f53d48429c84820ca88cca87ae6ccc936850b4fc77c1f14dbfd40af\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1118510475},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: E1125 15:21:12.299034 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: E1125 15:21:12.299855 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: E1125 15:21:12.300159 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: E1125 15:21:12.300609 4800 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: E1125 15:21:12.300819 4800 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.520361 4800 generic.go:334] "Generic (PLEG): container finished" podID="40b1358b-2b78-4d92-8e03-baf11a6aecde" containerID="200db8e86b2e222c8d22631fd57c0b9ff654d16ab0c516b8462b253c6fc8c688" exitCode=0 Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.520439 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgrss" event={"ID":"40b1358b-2b78-4d92-8e03-baf11a6aecde","Type":"ContainerDied","Data":"200db8e86b2e222c8d22631fd57c0b9ff654d16ab0c516b8462b253c6fc8c688"} Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.523415 4800 generic.go:334] "Generic (PLEG): container finished" podID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" containerID="e153f8c280f74a0a15fb88ea9bd24b3e33abe19e1349c67a6e2cc547ad8a1915" exitCode=0 Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.523466 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8b5b9" event={"ID":"ac983522-6eeb-4141-a7f4-99e9f6f3b480","Type":"ContainerDied","Data":"e153f8c280f74a0a15fb88ea9bd24b3e33abe19e1349c67a6e2cc547ad8a1915"} Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.525266 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.525750 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.526104 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.526513 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.526925 4800 generic.go:334] "Generic (PLEG): container finished" podID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" containerID="a76270452566ed60933d6ada9643e50a015dd390e9fdec8cc463a8d7629493a3" exitCode=0 Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.526994 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8clxk" event={"ID":"00b2c35b-aea8-40f1-af86-ab2ca005e90c","Type":"ContainerDied","Data":"a76270452566ed60933d6ada9643e50a015dd390e9fdec8cc463a8d7629493a3"} Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.526923 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.529562 4800 generic.go:334] "Generic (PLEG): container finished" podID="1b030df0-0b5c-4854-bdaf-6b61067bed50" containerID="e5f20085dab30490c37f3c9abb2e2eb628176e82622436b360defef73c46b500" exitCode=0 Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.529635 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hpkx7" event={"ID":"1b030df0-0b5c-4854-bdaf-6b61067bed50","Type":"ContainerDied","Data":"e5f20085dab30490c37f3c9abb2e2eb628176e82622436b360defef73c46b500"} Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.532365 4800 generic.go:334] "Generic (PLEG): container finished" podID="79762218-0d90-43f8-a512-a9b95dd3486e" containerID="b5b16d9802a93f4f36366080d09ede24053e2c077df74f9dd7dce9a4c692637f" exitCode=0 Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.532430 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx96n" event={"ID":"79762218-0d90-43f8-a512-a9b95dd3486e","Type":"ContainerDied","Data":"b5b16d9802a93f4f36366080d09ede24053e2c077df74f9dd7dce9a4c692637f"} Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.533416 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.533634 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.533881 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.534193 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.534674 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.535452 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.535812 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.536574 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.537197 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.537576 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.538057 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.640223 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.640306 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.640378 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.641223 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 15:21:12 crc kubenswrapper[4800]: I1125 15:21:12.641406 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da" gracePeriod=600 Nov 25 15:21:12 crc kubenswrapper[4800]: E1125 15:21:12.827571 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="7s" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.392178 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.552579 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da" exitCode=0 Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.553505 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da"} Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.554641 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.555085 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.555586 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.556022 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.556426 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.556748 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.557056 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.557317 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.558155 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.558488 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.558876 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.559162 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.559499 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.559968 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.560256 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.561050 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.561604 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.562311 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:13 crc kubenswrapper[4800]: I1125 15:21:13.563093 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.556999 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.558351 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.573763 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8b5b9" event={"ID":"ac983522-6eeb-4141-a7f4-99e9f6f3b480","Type":"ContainerStarted","Data":"b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559"} Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.576685 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"238219946c20c6136882342e4c1c6dd100485f8911e03584bba4787972e400d4"} Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.578585 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.579200 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.579526 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.579895 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.580444 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.580738 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.580987 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.581151 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.582171 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.582321 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.582461 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.582608 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.795224 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.795291 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.964212 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.965263 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.965306 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.965422 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.965678 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.966164 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.966411 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.966687 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.966963 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.967160 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.967395 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.967557 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.967652 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.967993 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.969165 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.969799 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.970146 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.970392 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.970633 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.970953 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.971205 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.971396 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.971760 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.972294 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.972546 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.972834 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.973313 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:15 crc kubenswrapper[4800]: I1125 15:21:15.973595 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.013560 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.014218 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.014674 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.015010 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.015346 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.015663 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.015954 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.016270 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.016578 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.016931 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.017239 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.017560 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.017882 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.584596 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.585179 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.585466 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.585794 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.586139 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.586561 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.587287 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.588335 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.588866 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.589238 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.589704 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.590289 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.625194 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.625804 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.626222 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.628222 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.628600 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.628781 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.628977 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.629163 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.629321 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.629788 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.629512 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.630024 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.630196 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.630360 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.630653 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.630833 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.631050 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.631207 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.631389 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.631553 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.631712 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.631922 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.632283 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.632441 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.632581 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:16 crc kubenswrapper[4800]: I1125 15:21:16.632725 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.590122 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8clxk" event={"ID":"00b2c35b-aea8-40f1-af86-ab2ca005e90c","Type":"ContainerStarted","Data":"e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112"} Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.786461 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.787479 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.788045 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.788259 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.788642 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.789173 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.789354 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.792017 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.792325 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.792504 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.792672 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.792830 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.793030 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.811762 4800 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="511c09cb-7edd-4195-bc55-233f51435125" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.811800 4800 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="511c09cb-7edd-4195-bc55-233f51435125" Nov 25 15:21:17 crc kubenswrapper[4800]: E1125 15:21:17.812325 4800 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:21:17 crc kubenswrapper[4800]: I1125 15:21:17.812883 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:21:18 crc kubenswrapper[4800]: E1125 15:21:18.444236 4800 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.145:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-marketplace-fx96n.187b491b8caaa8dd openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-marketplace-fx96n,UID:79762218-0d90-43f8-a512-a9b95dd3486e,APIVersion:v1,ResourceVersion:28570,FieldPath:spec.initContainers{extract-content},},Reason:Pulled,Message:Successfully pulled image \"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\" in 28.603s (28.603s including waiting). Image size: 1118510475 bytes.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 15:21:01.718620381 +0000 UTC m=+222.773028903,LastTimestamp:2025-11-25 15:21:01.718620381 +0000 UTC m=+222.773028903,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 15:21:18 crc kubenswrapper[4800]: I1125 15:21:18.594302 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:18 crc kubenswrapper[4800]: I1125 15:21:18.594572 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:18 crc kubenswrapper[4800]: I1125 15:21:18.594795 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:18 crc kubenswrapper[4800]: I1125 15:21:18.594968 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:18 crc kubenswrapper[4800]: I1125 15:21:18.595112 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:18 crc kubenswrapper[4800]: I1125 15:21:18.595258 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:18 crc kubenswrapper[4800]: I1125 15:21:18.595398 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:18 crc kubenswrapper[4800]: I1125 15:21:18.595538 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:18 crc kubenswrapper[4800]: I1125 15:21:18.595675 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:18 crc kubenswrapper[4800]: I1125 15:21:18.595826 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:18 crc kubenswrapper[4800]: I1125 15:21:18.596000 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:18 crc kubenswrapper[4800]: I1125 15:21:18.596144 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.365287 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.365698 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.667071 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.672435 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.673229 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.673768 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.674347 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.674829 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.675305 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.675733 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.676249 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.676669 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.677206 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.677531 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.677868 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.678211 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.791833 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.792503 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.793010 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.793571 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.793762 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.793966 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.794117 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.794267 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.794423 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.797492 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.800068 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.800705 4800 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: I1125 15:21:19.801230 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:19 crc kubenswrapper[4800]: E1125 15:21:19.828562 4800 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.145:6443: connect: connection refused" interval="7s" Nov 25 15:21:20 crc kubenswrapper[4800]: I1125 15:21:20.432415 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8b5b9" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" containerName="registry-server" probeResult="failure" output=< Nov 25 15:21:20 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 15:21:20 crc kubenswrapper[4800]: > Nov 25 15:21:21 crc kubenswrapper[4800]: W1125 15:21:21.097380 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-44539ad858fdec86cc7983ae62d20a199a43834822c293e9d37e86e8ed421eb4 WatchSource:0}: Error finding container 44539ad858fdec86cc7983ae62d20a199a43834822c293e9d37e86e8ed421eb4: Status 404 returned error can't find the container with id 44539ad858fdec86cc7983ae62d20a199a43834822c293e9d37e86e8ed421eb4 Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.617605 4800 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="471afeb0a7987ff282357b0052eb220d2d9d4fc10cf12f9396ac14c03dd66602" exitCode=0 Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.617748 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"471afeb0a7987ff282357b0052eb220d2d9d4fc10cf12f9396ac14c03dd66602"} Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.617992 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"44539ad858fdec86cc7983ae62d20a199a43834822c293e9d37e86e8ed421eb4"} Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.618238 4800 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="511c09cb-7edd-4195-bc55-233f51435125" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.618250 4800 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="511c09cb-7edd-4195-bc55-233f51435125" Nov 25 15:21:21 crc kubenswrapper[4800]: E1125 15:21:21.618935 4800 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.619003 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.619733 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.620074 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.620542 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.620993 4800 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.621261 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.621320 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx96n" event={"ID":"79762218-0d90-43f8-a512-a9b95dd3486e","Type":"ContainerStarted","Data":"c015c1e9605d339c8830cb934256764a377e0d20c891247b00fc6391552f22b7"} Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.621461 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.622208 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.623701 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.623940 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.624206 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.624506 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.627689 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.628154 4800 status_manager.go:851] "Failed to get status for pod" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" pod="openshift-marketplace/certified-operators-7bnkb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-7bnkb\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.628394 4800 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.628660 4800 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.629010 4800 status_manager.go:851] "Failed to get status for pod" podUID="16615745-a673-44e3-8cd7-980d59c421ad" pod="openshift-marketplace/certified-operators-4mb2k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-4mb2k\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.631159 4800 status_manager.go:851] "Failed to get status for pod" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-hvg6z\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.631785 4800 status_manager.go:851] "Failed to get status for pod" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.632236 4800 status_manager.go:851] "Failed to get status for pod" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" pod="openshift-marketplace/community-operators-8clxk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8clxk\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.632495 4800 status_manager.go:851] "Failed to get status for pod" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" pod="openshift-marketplace/redhat-marketplace-xgrss" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xgrss\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.632741 4800 status_manager.go:851] "Failed to get status for pod" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" pod="openshift-marketplace/community-operators-6td7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6td7n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.633074 4800 status_manager.go:851] "Failed to get status for pod" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" pod="openshift-marketplace/redhat-marketplace-fx96n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-fx96n\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.633370 4800 status_manager.go:851] "Failed to get status for pod" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" pod="openshift-marketplace/redhat-operators-hpkx7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-hpkx7\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.633817 4800 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:21 crc kubenswrapper[4800]: I1125 15:21:21.634184 4800 status_manager.go:851] "Failed to get status for pod" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" pod="openshift-marketplace/redhat-operators-8b5b9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8b5b9\": dial tcp 38.102.83.145:6443: connect: connection refused" Nov 25 15:21:22 crc kubenswrapper[4800]: I1125 15:21:22.631155 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgrss" event={"ID":"40b1358b-2b78-4d92-8e03-baf11a6aecde","Type":"ContainerStarted","Data":"ae319b11dc4821c31f81de0afc767dc5b3810f3501512efe816d44d3bb85113f"} Nov 25 15:21:22 crc kubenswrapper[4800]: I1125 15:21:22.634915 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5aeef1fc22637df3dca82888f09dfa2cd501dc818eb0bbf249fc77d54fe6ac8e"} Nov 25 15:21:22 crc kubenswrapper[4800]: I1125 15:21:22.634945 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"174f7dadc09b13a697a27385e4fc1f6177dffe97147d38a470847166219d7d8d"} Nov 25 15:21:22 crc kubenswrapper[4800]: I1125 15:21:22.634954 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5ab1c2ef249e52fdda34a7a1b80f1a55f82d309cd3e07a9ea15373d156c19e9e"} Nov 25 15:21:22 crc kubenswrapper[4800]: I1125 15:21:22.639424 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hpkx7" event={"ID":"1b030df0-0b5c-4854-bdaf-6b61067bed50","Type":"ContainerStarted","Data":"8d28ee96a3006ec0a5807361e3dfc85f38ca7627ecad4762dac1e4ab865c9f19"} Nov 25 15:21:23 crc kubenswrapper[4800]: I1125 15:21:23.396352 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 15:21:23 crc kubenswrapper[4800]: I1125 15:21:23.650265 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"46f3d4ae882ce85bb912058aa8e9e343277d1184edf84be73fb9294aa2889a8f"} Nov 25 15:21:23 crc kubenswrapper[4800]: I1125 15:21:23.650308 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"24794001fe1da7d4567d81c743b65a9fecf2360f9444de509ac45cf3185698bb"} Nov 25 15:21:23 crc kubenswrapper[4800]: I1125 15:21:23.650506 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:21:23 crc kubenswrapper[4800]: I1125 15:21:23.650553 4800 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="511c09cb-7edd-4195-bc55-233f51435125" Nov 25 15:21:23 crc kubenswrapper[4800]: I1125 15:21:23.650566 4800 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="511c09cb-7edd-4195-bc55-233f51435125" Nov 25 15:21:25 crc kubenswrapper[4800]: I1125 15:21:25.629903 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:21:26 crc kubenswrapper[4800]: I1125 15:21:26.167934 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:21:26 crc kubenswrapper[4800]: I1125 15:21:26.168290 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:21:26 crc kubenswrapper[4800]: I1125 15:21:26.214540 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:21:26 crc kubenswrapper[4800]: I1125 15:21:26.706174 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:21:27 crc kubenswrapper[4800]: I1125 15:21:27.564539 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:21:27 crc kubenswrapper[4800]: I1125 15:21:27.564590 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:21:27 crc kubenswrapper[4800]: I1125 15:21:27.606900 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:21:27 crc kubenswrapper[4800]: I1125 15:21:27.702485 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:21:27 crc kubenswrapper[4800]: I1125 15:21:27.813859 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:21:27 crc kubenswrapper[4800]: I1125 15:21:27.813899 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:21:27 crc kubenswrapper[4800]: I1125 15:21:27.819063 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:21:28 crc kubenswrapper[4800]: I1125 15:21:28.002724 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:21:28 crc kubenswrapper[4800]: I1125 15:21:28.002796 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:21:28 crc kubenswrapper[4800]: I1125 15:21:28.045939 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:21:28 crc kubenswrapper[4800]: I1125 15:21:28.680487 4800 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:21:28 crc kubenswrapper[4800]: I1125 15:21:28.733980 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:21:28 crc kubenswrapper[4800]: I1125 15:21:28.921639 4800 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="c415a991-d806-441d-afdf-0cdb17d5f3bf" Nov 25 15:21:28 crc kubenswrapper[4800]: I1125 15:21:28.945261 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:21:28 crc kubenswrapper[4800]: I1125 15:21:28.945317 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:21:28 crc kubenswrapper[4800]: I1125 15:21:28.987259 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:21:29 crc kubenswrapper[4800]: I1125 15:21:29.419732 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:21:29 crc kubenswrapper[4800]: I1125 15:21:29.461352 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:21:29 crc kubenswrapper[4800]: I1125 15:21:29.691167 4800 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="511c09cb-7edd-4195-bc55-233f51435125" Nov 25 15:21:29 crc kubenswrapper[4800]: I1125 15:21:29.691198 4800 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="511c09cb-7edd-4195-bc55-233f51435125" Nov 25 15:21:29 crc kubenswrapper[4800]: I1125 15:21:29.698137 4800 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="c415a991-d806-441d-afdf-0cdb17d5f3bf" Nov 25 15:21:29 crc kubenswrapper[4800]: I1125 15:21:29.738982 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:21:54 crc kubenswrapper[4800]: I1125 15:21:54.851052 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 15:21:55 crc kubenswrapper[4800]: I1125 15:21:55.458143 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 15:21:55 crc kubenswrapper[4800]: I1125 15:21:55.656058 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 15:21:56 crc kubenswrapper[4800]: I1125 15:21:56.012835 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 15:21:56 crc kubenswrapper[4800]: I1125 15:21:56.028183 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 15:21:56 crc kubenswrapper[4800]: I1125 15:21:56.311509 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 15:21:57 crc kubenswrapper[4800]: I1125 15:21:57.511410 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 15:21:57 crc kubenswrapper[4800]: I1125 15:21:57.739545 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 15:21:58 crc kubenswrapper[4800]: I1125 15:21:58.720029 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 15:21:58 crc kubenswrapper[4800]: I1125 15:21:58.880776 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 15:21:58 crc kubenswrapper[4800]: I1125 15:21:58.956921 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 15:21:59 crc kubenswrapper[4800]: I1125 15:21:59.123662 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 15:21:59 crc kubenswrapper[4800]: I1125 15:21:59.273253 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 15:21:59 crc kubenswrapper[4800]: I1125 15:21:59.398831 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 15:21:59 crc kubenswrapper[4800]: I1125 15:21:59.478964 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 15:21:59 crc kubenswrapper[4800]: I1125 15:21:59.690649 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 15:22:00 crc kubenswrapper[4800]: I1125 15:22:00.022646 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 15:22:00 crc kubenswrapper[4800]: I1125 15:22:00.024541 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 15:22:00 crc kubenswrapper[4800]: I1125 15:22:00.132374 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 15:22:00 crc kubenswrapper[4800]: I1125 15:22:00.182123 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 15:22:00 crc kubenswrapper[4800]: I1125 15:22:00.371054 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 15:22:00 crc kubenswrapper[4800]: I1125 15:22:00.406059 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 15:22:00 crc kubenswrapper[4800]: I1125 15:22:00.439188 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 15:22:00 crc kubenswrapper[4800]: I1125 15:22:00.541913 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 15:22:00 crc kubenswrapper[4800]: I1125 15:22:00.594748 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 15:22:00 crc kubenswrapper[4800]: I1125 15:22:00.728901 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 15:22:00 crc kubenswrapper[4800]: I1125 15:22:00.835985 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.010068 4800 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.105726 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.135868 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.147999 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.150463 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.151192 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.265208 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.328741 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.355564 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.796762 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.800131 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.844014 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 15:22:01 crc kubenswrapper[4800]: I1125 15:22:01.855911 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 15:22:02 crc kubenswrapper[4800]: I1125 15:22:02.016608 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 15:22:02 crc kubenswrapper[4800]: I1125 15:22:02.089221 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 15:22:02 crc kubenswrapper[4800]: I1125 15:22:02.263715 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 15:22:02 crc kubenswrapper[4800]: I1125 15:22:02.299187 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 15:22:02 crc kubenswrapper[4800]: I1125 15:22:02.478103 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 15:22:02 crc kubenswrapper[4800]: I1125 15:22:02.771659 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 15:22:02 crc kubenswrapper[4800]: I1125 15:22:02.978808 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 15:22:03 crc kubenswrapper[4800]: I1125 15:22:03.130621 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 15:22:03 crc kubenswrapper[4800]: I1125 15:22:03.225655 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 15:22:03 crc kubenswrapper[4800]: I1125 15:22:03.333277 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 15:22:03 crc kubenswrapper[4800]: I1125 15:22:03.339308 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 15:22:03 crc kubenswrapper[4800]: I1125 15:22:03.448434 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 15:22:03 crc kubenswrapper[4800]: I1125 15:22:03.453616 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 15:22:03 crc kubenswrapper[4800]: I1125 15:22:03.547027 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 15:22:03 crc kubenswrapper[4800]: I1125 15:22:03.619036 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 15:22:03 crc kubenswrapper[4800]: I1125 15:22:03.637132 4800 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 15:22:03 crc kubenswrapper[4800]: I1125 15:22:03.890022 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 15:22:03 crc kubenswrapper[4800]: I1125 15:22:03.949737 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 15:22:03 crc kubenswrapper[4800]: I1125 15:22:03.959339 4800 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.096168 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.179409 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.293404 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.647240 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.655992 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.697725 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.730598 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.752205 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.853942 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.870566 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.883041 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.940675 4800 generic.go:334] "Generic (PLEG): container finished" podID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerID="ec5f464cf9cb45a8afe51ceba8d7bab6a53bbc928fd8e3e2911062e029bd7820" exitCode=0 Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.940733 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" event={"ID":"c50787f1-b3aa-49be-adc2-610beeeede6d","Type":"ContainerDied","Data":"ec5f464cf9cb45a8afe51ceba8d7bab6a53bbc928fd8e3e2911062e029bd7820"} Nov 25 15:22:04 crc kubenswrapper[4800]: I1125 15:22:04.941394 4800 scope.go:117] "RemoveContainer" containerID="ec5f464cf9cb45a8afe51ceba8d7bab6a53bbc928fd8e3e2911062e029bd7820" Nov 25 15:22:05 crc kubenswrapper[4800]: I1125 15:22:05.204797 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 15:22:05 crc kubenswrapper[4800]: I1125 15:22:05.469922 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 15:22:05 crc kubenswrapper[4800]: I1125 15:22:05.471537 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 15:22:05 crc kubenswrapper[4800]: I1125 15:22:05.654212 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 15:22:05 crc kubenswrapper[4800]: I1125 15:22:05.934749 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 15:22:05 crc kubenswrapper[4800]: I1125 15:22:05.973050 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-5h48t_c50787f1-b3aa-49be-adc2-610beeeede6d/marketplace-operator/1.log" Nov 25 15:22:05 crc kubenswrapper[4800]: I1125 15:22:05.973831 4800 generic.go:334] "Generic (PLEG): container finished" podID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerID="a30f6cdb64a696f783a529ff121cdeb5e41c8873df0cd1693be7ffa61d861173" exitCode=1 Nov 25 15:22:05 crc kubenswrapper[4800]: I1125 15:22:05.973964 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" event={"ID":"c50787f1-b3aa-49be-adc2-610beeeede6d","Type":"ContainerDied","Data":"a30f6cdb64a696f783a529ff121cdeb5e41c8873df0cd1693be7ffa61d861173"} Nov 25 15:22:05 crc kubenswrapper[4800]: I1125 15:22:05.974289 4800 scope.go:117] "RemoveContainer" containerID="ec5f464cf9cb45a8afe51ceba8d7bab6a53bbc928fd8e3e2911062e029bd7820" Nov 25 15:22:05 crc kubenswrapper[4800]: I1125 15:22:05.976164 4800 scope.go:117] "RemoveContainer" containerID="a30f6cdb64a696f783a529ff121cdeb5e41c8873df0cd1693be7ffa61d861173" Nov 25 15:22:05 crc kubenswrapper[4800]: E1125 15:22:05.976703 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-5h48t_openshift-marketplace(c50787f1-b3aa-49be-adc2-610beeeede6d)\"" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" Nov 25 15:22:06 crc kubenswrapper[4800]: I1125 15:22:06.168416 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:22:06 crc kubenswrapper[4800]: I1125 15:22:06.169152 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:22:06 crc kubenswrapper[4800]: I1125 15:22:06.457745 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 15:22:06 crc kubenswrapper[4800]: I1125 15:22:06.638778 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 15:22:06 crc kubenswrapper[4800]: I1125 15:22:06.984305 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-5h48t_c50787f1-b3aa-49be-adc2-610beeeede6d/marketplace-operator/1.log" Nov 25 15:22:06 crc kubenswrapper[4800]: I1125 15:22:06.984999 4800 scope.go:117] "RemoveContainer" containerID="a30f6cdb64a696f783a529ff121cdeb5e41c8873df0cd1693be7ffa61d861173" Nov 25 15:22:06 crc kubenswrapper[4800]: E1125 15:22:06.985233 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-5h48t_openshift-marketplace(c50787f1-b3aa-49be-adc2-610beeeede6d)\"" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" Nov 25 15:22:07 crc kubenswrapper[4800]: I1125 15:22:07.132371 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 15:22:07 crc kubenswrapper[4800]: I1125 15:22:07.304984 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 15:22:07 crc kubenswrapper[4800]: I1125 15:22:07.400904 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 15:22:07 crc kubenswrapper[4800]: I1125 15:22:07.502401 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 15:22:07 crc kubenswrapper[4800]: I1125 15:22:07.660497 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 15:22:07 crc kubenswrapper[4800]: I1125 15:22:07.730238 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 15:22:07 crc kubenswrapper[4800]: I1125 15:22:07.954911 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 15:22:07 crc kubenswrapper[4800]: I1125 15:22:07.991717 4800 scope.go:117] "RemoveContainer" containerID="a30f6cdb64a696f783a529ff121cdeb5e41c8873df0cd1693be7ffa61d861173" Nov 25 15:22:07 crc kubenswrapper[4800]: E1125 15:22:07.992226 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-5h48t_openshift-marketplace(c50787f1-b3aa-49be-adc2-610beeeede6d)\"" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.039640 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.063079 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.203793 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.408022 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.493950 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.576423 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.651941 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.847513 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.862241 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.871833 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.874659 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.892429 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.909709 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 15:22:08 crc kubenswrapper[4800]: I1125 15:22:08.967813 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 15:22:09 crc kubenswrapper[4800]: I1125 15:22:09.122256 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 15:22:09 crc kubenswrapper[4800]: I1125 15:22:09.164770 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 15:22:09 crc kubenswrapper[4800]: I1125 15:22:09.231171 4800 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 15:22:09 crc kubenswrapper[4800]: I1125 15:22:09.324917 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 15:22:09 crc kubenswrapper[4800]: I1125 15:22:09.343984 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 15:22:09 crc kubenswrapper[4800]: I1125 15:22:09.372505 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 15:22:09 crc kubenswrapper[4800]: I1125 15:22:09.452002 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 15:22:09 crc kubenswrapper[4800]: I1125 15:22:09.502208 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 15:22:09 crc kubenswrapper[4800]: I1125 15:22:09.791233 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 15:22:09 crc kubenswrapper[4800]: I1125 15:22:09.809872 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 15:22:09 crc kubenswrapper[4800]: I1125 15:22:09.821192 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 15:22:10 crc kubenswrapper[4800]: I1125 15:22:10.069255 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 15:22:10 crc kubenswrapper[4800]: I1125 15:22:10.095009 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 15:22:10 crc kubenswrapper[4800]: I1125 15:22:10.253804 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 15:22:10 crc kubenswrapper[4800]: I1125 15:22:10.273010 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 15:22:10 crc kubenswrapper[4800]: I1125 15:22:10.463737 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 15:22:10 crc kubenswrapper[4800]: I1125 15:22:10.628697 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 15:22:10 crc kubenswrapper[4800]: I1125 15:22:10.667175 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 15:22:10 crc kubenswrapper[4800]: I1125 15:22:10.668449 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 15:22:10 crc kubenswrapper[4800]: I1125 15:22:10.743085 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 15:22:10 crc kubenswrapper[4800]: I1125 15:22:10.861166 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 15:22:10 crc kubenswrapper[4800]: I1125 15:22:10.864363 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 15:22:10 crc kubenswrapper[4800]: I1125 15:22:10.901266 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.090533 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.190316 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.224469 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.410357 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.415429 4800 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.416542 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fx96n" podStartSLOduration=52.788464491 podStartE2EDuration="3m4.416519794s" podCreationTimestamp="2025-11-25 15:19:07 +0000 UTC" firstStartedPulling="2025-11-25 15:19:09.442153466 +0000 UTC m=+110.496561948" lastFinishedPulling="2025-11-25 15:21:21.070208729 +0000 UTC m=+242.124617251" observedRunningTime="2025-11-25 15:21:28.870200722 +0000 UTC m=+249.924609224" watchObservedRunningTime="2025-11-25 15:22:11.416519794 +0000 UTC m=+292.470928326" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.418294 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hpkx7" podStartSLOduration=52.362087173 podStartE2EDuration="3m3.4182837s" podCreationTimestamp="2025-11-25 15:19:08 +0000 UTC" firstStartedPulling="2025-11-25 15:19:10.530399315 +0000 UTC m=+111.584807787" lastFinishedPulling="2025-11-25 15:21:21.586595822 +0000 UTC m=+242.641004314" observedRunningTime="2025-11-25 15:21:28.997400143 +0000 UTC m=+250.051808635" watchObservedRunningTime="2025-11-25 15:22:11.4182837 +0000 UTC m=+292.472692222" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.419356 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=77.419340499 podStartE2EDuration="1m17.419340499s" podCreationTimestamp="2025-11-25 15:20:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:21:28.746810953 +0000 UTC m=+249.801219455" watchObservedRunningTime="2025-11-25 15:22:11.419340499 +0000 UTC m=+292.473749031" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.420415 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8b5b9" podStartSLOduration=57.660885518 podStartE2EDuration="3m2.420403807s" podCreationTimestamp="2025-11-25 15:19:09 +0000 UTC" firstStartedPulling="2025-11-25 15:19:10.463547098 +0000 UTC m=+111.517955580" lastFinishedPulling="2025-11-25 15:21:15.223065367 +0000 UTC m=+236.277473869" observedRunningTime="2025-11-25 15:21:28.702457349 +0000 UTC m=+249.756865841" watchObservedRunningTime="2025-11-25 15:22:11.420403807 +0000 UTC m=+292.474812329" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.421263 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7bnkb" podStartSLOduration=63.481322327 podStartE2EDuration="3m6.421252019s" podCreationTimestamp="2025-11-25 15:19:05 +0000 UTC" firstStartedPulling="2025-11-25 15:19:07.345411891 +0000 UTC m=+108.399820373" lastFinishedPulling="2025-11-25 15:21:10.285341553 +0000 UTC m=+231.339750065" observedRunningTime="2025-11-25 15:21:28.71788125 +0000 UTC m=+249.772289732" watchObservedRunningTime="2025-11-25 15:22:11.421252019 +0000 UTC m=+292.475660541" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.422824 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4mb2k" podStartSLOduration=63.110327429 podStartE2EDuration="3m6.422814391s" podCreationTimestamp="2025-11-25 15:19:05 +0000 UTC" firstStartedPulling="2025-11-25 15:19:06.27353955 +0000 UTC m=+107.327948032" lastFinishedPulling="2025-11-25 15:21:09.586026512 +0000 UTC m=+230.640434994" observedRunningTime="2025-11-25 15:21:28.760617394 +0000 UTC m=+249.815025876" watchObservedRunningTime="2025-11-25 15:22:11.422814391 +0000 UTC m=+292.477222913" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.424109 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6td7n" podStartSLOduration=64.518046351 podStartE2EDuration="3m6.424098654s" podCreationTimestamp="2025-11-25 15:19:05 +0000 UTC" firstStartedPulling="2025-11-25 15:19:07.336689587 +0000 UTC m=+108.391098069" lastFinishedPulling="2025-11-25 15:21:09.24274186 +0000 UTC m=+230.297150372" observedRunningTime="2025-11-25 15:21:28.850656244 +0000 UTC m=+249.905064726" watchObservedRunningTime="2025-11-25 15:22:11.424098654 +0000 UTC m=+292.478507186" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.424301 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8clxk" podStartSLOduration=57.89106155 podStartE2EDuration="3m6.42429071s" podCreationTimestamp="2025-11-25 15:19:05 +0000 UTC" firstStartedPulling="2025-11-25 15:19:07.341653918 +0000 UTC m=+108.396062400" lastFinishedPulling="2025-11-25 15:21:15.874883088 +0000 UTC m=+236.929291560" observedRunningTime="2025-11-25 15:21:28.813495473 +0000 UTC m=+249.867903955" watchObservedRunningTime="2025-11-25 15:22:11.42429071 +0000 UTC m=+292.478699272" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.424495 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xgrss" podStartSLOduration=52.303717038 podStartE2EDuration="3m4.424483046s" podCreationTimestamp="2025-11-25 15:19:07 +0000 UTC" firstStartedPulling="2025-11-25 15:19:09.44271034 +0000 UTC m=+110.497118822" lastFinishedPulling="2025-11-25 15:21:21.563476348 +0000 UTC m=+242.617884830" observedRunningTime="2025-11-25 15:21:28.829520217 +0000 UTC m=+249.883928699" watchObservedRunningTime="2025-11-25 15:22:11.424483046 +0000 UTC m=+292.478891568" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.425352 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.425427 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.430624 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.450831 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=43.450803294 podStartE2EDuration="43.450803294s" podCreationTimestamp="2025-11-25 15:21:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:22:11.446241572 +0000 UTC m=+292.500650074" watchObservedRunningTime="2025-11-25 15:22:11.450803294 +0000 UTC m=+292.505211776" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.466443 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.693707 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.703738 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.728703 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.868336 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 15:22:11 crc kubenswrapper[4800]: I1125 15:22:11.879465 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 15:22:12 crc kubenswrapper[4800]: I1125 15:22:12.030720 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 15:22:12 crc kubenswrapper[4800]: I1125 15:22:12.169608 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 15:22:12 crc kubenswrapper[4800]: I1125 15:22:12.232896 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 15:22:12 crc kubenswrapper[4800]: I1125 15:22:12.765161 4800 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 15:22:12 crc kubenswrapper[4800]: I1125 15:22:12.765535 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://74393ed9369462499643a10c6a12d5e2ad4aa32b6841bd461c9bdb77e3bfd5e6" gracePeriod=5 Nov 25 15:22:12 crc kubenswrapper[4800]: I1125 15:22:12.909546 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 15:22:12 crc kubenswrapper[4800]: I1125 15:22:12.955814 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 15:22:12 crc kubenswrapper[4800]: I1125 15:22:12.956163 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 15:22:13 crc kubenswrapper[4800]: I1125 15:22:13.100729 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 15:22:13 crc kubenswrapper[4800]: I1125 15:22:13.436758 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 15:22:13 crc kubenswrapper[4800]: I1125 15:22:13.653374 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 15:22:13 crc kubenswrapper[4800]: I1125 15:22:13.735319 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 15:22:13 crc kubenswrapper[4800]: I1125 15:22:13.761935 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 15:22:13 crc kubenswrapper[4800]: I1125 15:22:13.922947 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 15:22:14 crc kubenswrapper[4800]: I1125 15:22:14.013588 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 15:22:14 crc kubenswrapper[4800]: I1125 15:22:14.150648 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 15:22:14 crc kubenswrapper[4800]: I1125 15:22:14.235927 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 15:22:14 crc kubenswrapper[4800]: I1125 15:22:14.378831 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 15:22:14 crc kubenswrapper[4800]: I1125 15:22:14.463303 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 15:22:14 crc kubenswrapper[4800]: I1125 15:22:14.492712 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 15:22:14 crc kubenswrapper[4800]: I1125 15:22:14.633837 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 15:22:14 crc kubenswrapper[4800]: I1125 15:22:14.763917 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 15:22:14 crc kubenswrapper[4800]: I1125 15:22:14.764095 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 15:22:14 crc kubenswrapper[4800]: I1125 15:22:14.778533 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 15:22:14 crc kubenswrapper[4800]: I1125 15:22:14.814740 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.098782 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.098934 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.193662 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.425954 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.433525 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.490883 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.502807 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.530540 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.648408 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.674266 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.744906 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.875651 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 15:22:15 crc kubenswrapper[4800]: I1125 15:22:15.952537 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 15:22:16 crc kubenswrapper[4800]: I1125 15:22:16.166178 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 15:22:16 crc kubenswrapper[4800]: I1125 15:22:16.266261 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 15:22:16 crc kubenswrapper[4800]: I1125 15:22:16.278151 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 15:22:16 crc kubenswrapper[4800]: I1125 15:22:16.374000 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 15:22:16 crc kubenswrapper[4800]: I1125 15:22:16.467906 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 15:22:16 crc kubenswrapper[4800]: I1125 15:22:16.657272 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 15:22:16 crc kubenswrapper[4800]: I1125 15:22:16.706728 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 15:22:16 crc kubenswrapper[4800]: I1125 15:22:16.729318 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 15:22:16 crc kubenswrapper[4800]: I1125 15:22:16.813441 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 15:22:16 crc kubenswrapper[4800]: I1125 15:22:16.832710 4800 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 15:22:16 crc kubenswrapper[4800]: I1125 15:22:16.947811 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 15:22:17 crc kubenswrapper[4800]: I1125 15:22:17.089824 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 15:22:17 crc kubenswrapper[4800]: I1125 15:22:17.645451 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 15:22:17 crc kubenswrapper[4800]: I1125 15:22:17.771562 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 15:22:17 crc kubenswrapper[4800]: I1125 15:22:17.832774 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.057215 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.057271 4800 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="74393ed9369462499643a10c6a12d5e2ad4aa32b6841bd461c9bdb77e3bfd5e6" exitCode=137 Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.357048 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.357404 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.402389 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.403276 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.477240 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.477340 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.477387 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.477461 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.477502 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.477520 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.477571 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.477585 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.477614 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.478221 4800 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.478250 4800 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.478263 4800 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.478273 4800 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.488160 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.579788 4800 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.609932 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.634470 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.639069 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.672514 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.767180 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.797884 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.825785 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.879375 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.898377 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.901599 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 15:22:18 crc kubenswrapper[4800]: I1125 15:22:18.952653 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.014055 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.063646 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.063715 4800 scope.go:117] "RemoveContainer" containerID="74393ed9369462499643a10c6a12d5e2ad4aa32b6841bd461c9bdb77e3bfd5e6" Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.063947 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.151698 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.156921 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.664576 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.744418 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.794464 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.794708 4800 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.809541 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.809601 4800 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="71ade9da-c12c-424c-98cd-8ed2874eca69" Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.817049 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 15:22:19 crc kubenswrapper[4800]: I1125 15:22:19.817149 4800 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="71ade9da-c12c-424c-98cd-8ed2874eca69" Nov 25 15:22:20 crc kubenswrapper[4800]: I1125 15:22:20.271735 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 15:22:20 crc kubenswrapper[4800]: I1125 15:22:20.276287 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 15:22:20 crc kubenswrapper[4800]: I1125 15:22:20.303138 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 15:22:20 crc kubenswrapper[4800]: I1125 15:22:20.337524 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 15:22:20 crc kubenswrapper[4800]: I1125 15:22:20.426163 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 15:22:20 crc kubenswrapper[4800]: I1125 15:22:20.473480 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 15:22:20 crc kubenswrapper[4800]: I1125 15:22:20.492180 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 15:22:20 crc kubenswrapper[4800]: I1125 15:22:20.519359 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 15:22:20 crc kubenswrapper[4800]: I1125 15:22:20.612982 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 15:22:20 crc kubenswrapper[4800]: I1125 15:22:20.787008 4800 scope.go:117] "RemoveContainer" containerID="a30f6cdb64a696f783a529ff121cdeb5e41c8873df0cd1693be7ffa61d861173" Nov 25 15:22:20 crc kubenswrapper[4800]: I1125 15:22:20.839255 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 15:22:20 crc kubenswrapper[4800]: I1125 15:22:20.993065 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 15:22:21 crc kubenswrapper[4800]: I1125 15:22:21.114016 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 15:22:21 crc kubenswrapper[4800]: I1125 15:22:21.224862 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 15:22:21 crc kubenswrapper[4800]: I1125 15:22:21.274786 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 15:22:21 crc kubenswrapper[4800]: I1125 15:22:21.663010 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 15:22:21 crc kubenswrapper[4800]: I1125 15:22:21.851806 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 15:22:22 crc kubenswrapper[4800]: I1125 15:22:22.085768 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-5h48t_c50787f1-b3aa-49be-adc2-610beeeede6d/marketplace-operator/1.log" Nov 25 15:22:22 crc kubenswrapper[4800]: I1125 15:22:22.085877 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" event={"ID":"c50787f1-b3aa-49be-adc2-610beeeede6d","Type":"ContainerStarted","Data":"07c4fa3b7db534e9673f1d9350b7e832d3595607c5be7a5f9e3ad5a8586326e4"} Nov 25 15:22:22 crc kubenswrapper[4800]: I1125 15:22:22.086416 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:22:22 crc kubenswrapper[4800]: I1125 15:22:22.091234 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:22:22 crc kubenswrapper[4800]: I1125 15:22:22.134377 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 15:22:22 crc kubenswrapper[4800]: I1125 15:22:22.241224 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 15:22:22 crc kubenswrapper[4800]: I1125 15:22:22.261456 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 15:22:22 crc kubenswrapper[4800]: I1125 15:22:22.607872 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 15:22:22 crc kubenswrapper[4800]: I1125 15:22:22.624228 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 15:22:22 crc kubenswrapper[4800]: I1125 15:22:22.655590 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 15:22:22 crc kubenswrapper[4800]: I1125 15:22:22.802827 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 15:22:22 crc kubenswrapper[4800]: I1125 15:22:22.988326 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 15:22:23 crc kubenswrapper[4800]: I1125 15:22:23.189312 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 15:22:23 crc kubenswrapper[4800]: I1125 15:22:23.212222 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 15:22:23 crc kubenswrapper[4800]: I1125 15:22:23.383348 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 15:22:23 crc kubenswrapper[4800]: I1125 15:22:23.860215 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 15:22:23 crc kubenswrapper[4800]: I1125 15:22:23.964271 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 15:22:24 crc kubenswrapper[4800]: I1125 15:22:24.146622 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 15:22:24 crc kubenswrapper[4800]: I1125 15:22:24.494627 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 15:22:24 crc kubenswrapper[4800]: I1125 15:22:24.497101 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 15:22:24 crc kubenswrapper[4800]: I1125 15:22:24.538641 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 15:22:24 crc kubenswrapper[4800]: I1125 15:22:24.778240 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 15:22:24 crc kubenswrapper[4800]: I1125 15:22:24.825955 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 15:22:24 crc kubenswrapper[4800]: I1125 15:22:24.965383 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 15:22:25 crc kubenswrapper[4800]: I1125 15:22:25.010522 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 15:22:25 crc kubenswrapper[4800]: I1125 15:22:25.085980 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 15:22:25 crc kubenswrapper[4800]: I1125 15:22:25.601410 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 15:22:25 crc kubenswrapper[4800]: I1125 15:22:25.603467 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 15:22:26 crc kubenswrapper[4800]: I1125 15:22:26.000409 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 15:22:26 crc kubenswrapper[4800]: I1125 15:22:26.944068 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 15:22:26 crc kubenswrapper[4800]: I1125 15:22:26.974376 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 15:22:27 crc kubenswrapper[4800]: I1125 15:22:27.123778 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 15:22:27 crc kubenswrapper[4800]: I1125 15:22:27.240323 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 15:22:27 crc kubenswrapper[4800]: I1125 15:22:27.299961 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 15:22:27 crc kubenswrapper[4800]: I1125 15:22:27.912369 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 15:22:28 crc kubenswrapper[4800]: I1125 15:22:28.133489 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 15:22:28 crc kubenswrapper[4800]: I1125 15:22:28.208343 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 15:22:29 crc kubenswrapper[4800]: I1125 15:22:29.726017 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 15:22:29 crc kubenswrapper[4800]: I1125 15:22:29.729259 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 15:22:29 crc kubenswrapper[4800]: I1125 15:22:29.859162 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 15:23:01 crc kubenswrapper[4800]: I1125 15:23:01.659962 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5mtjz"] Nov 25 15:23:01 crc kubenswrapper[4800]: I1125 15:23:01.661380 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" podUID="ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" containerName="controller-manager" containerID="cri-o://f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689" gracePeriod=30 Nov 25 15:23:01 crc kubenswrapper[4800]: I1125 15:23:01.757708 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24"] Nov 25 15:23:01 crc kubenswrapper[4800]: I1125 15:23:01.757941 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" podUID="09e6bcd9-e9bf-408d-9a27-e3d2b7b29960" containerName="route-controller-manager" containerID="cri-o://6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112" gracePeriod=30 Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.052779 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.100975 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtlw2\" (UniqueName: \"kubernetes.io/projected/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-kube-api-access-qtlw2\") pod \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.101050 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-proxy-ca-bundles\") pod \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.101090 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-serving-cert\") pod \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.101142 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-client-ca\") pod \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.101292 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-config\") pod \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\" (UID: \"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b\") " Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.103248 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-client-ca" (OuterVolumeSpecName: "client-ca") pod "ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" (UID: "ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.103776 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" (UID: "ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.103994 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-config" (OuterVolumeSpecName: "config") pod "ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" (UID: "ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.110219 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-kube-api-access-qtlw2" (OuterVolumeSpecName: "kube-api-access-qtlw2") pod "ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" (UID: "ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b"). InnerVolumeSpecName "kube-api-access-qtlw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.110321 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" (UID: "ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.115616 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.203030 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqxl2\" (UniqueName: \"kubernetes.io/projected/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-kube-api-access-xqxl2\") pod \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.203182 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-client-ca\") pod \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.203220 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-config\") pod \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.203259 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-serving-cert\") pod \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\" (UID: \"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960\") " Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.203550 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.203568 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtlw2\" (UniqueName: \"kubernetes.io/projected/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-kube-api-access-qtlw2\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.203578 4800 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.203588 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.203597 4800 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.204344 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-client-ca" (OuterVolumeSpecName: "client-ca") pod "09e6bcd9-e9bf-408d-9a27-e3d2b7b29960" (UID: "09e6bcd9-e9bf-408d-9a27-e3d2b7b29960"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.204470 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-config" (OuterVolumeSpecName: "config") pod "09e6bcd9-e9bf-408d-9a27-e3d2b7b29960" (UID: "09e6bcd9-e9bf-408d-9a27-e3d2b7b29960"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.208548 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09e6bcd9-e9bf-408d-9a27-e3d2b7b29960" (UID: "09e6bcd9-e9bf-408d-9a27-e3d2b7b29960"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.208660 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-kube-api-access-xqxl2" (OuterVolumeSpecName: "kube-api-access-xqxl2") pod "09e6bcd9-e9bf-408d-9a27-e3d2b7b29960" (UID: "09e6bcd9-e9bf-408d-9a27-e3d2b7b29960"). InnerVolumeSpecName "kube-api-access-xqxl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.305320 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqxl2\" (UniqueName: \"kubernetes.io/projected/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-kube-api-access-xqxl2\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.305399 4800 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.305424 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.305442 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.332648 4800 generic.go:334] "Generic (PLEG): container finished" podID="09e6bcd9-e9bf-408d-9a27-e3d2b7b29960" containerID="6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112" exitCode=0 Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.332726 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" event={"ID":"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960","Type":"ContainerDied","Data":"6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112"} Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.332740 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.332755 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24" event={"ID":"09e6bcd9-e9bf-408d-9a27-e3d2b7b29960","Type":"ContainerDied","Data":"ce70bf90006edd41b4dd58d337bc75659c81293dffd4e892e0223efec413d10a"} Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.332808 4800 scope.go:117] "RemoveContainer" containerID="6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.336244 4800 generic.go:334] "Generic (PLEG): container finished" podID="ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" containerID="f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689" exitCode=0 Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.336290 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" event={"ID":"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b","Type":"ContainerDied","Data":"f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689"} Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.336324 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" event={"ID":"ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b","Type":"ContainerDied","Data":"93cd5b16ed5477ce78133735bd4abf67dc9ae219d4fe51f001799d5bcf8b9832"} Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.336426 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5mtjz" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.373313 4800 scope.go:117] "RemoveContainer" containerID="6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.373515 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24"] Nov 25 15:23:02 crc kubenswrapper[4800]: E1125 15:23:02.373940 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112\": container with ID starting with 6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112 not found: ID does not exist" containerID="6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.373975 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112"} err="failed to get container status \"6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112\": rpc error: code = NotFound desc = could not find container \"6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112\": container with ID starting with 6905b8807f41ea1c61439b6bb823eb4b7784e1992e7d8e5011e6dc9e06f88112 not found: ID does not exist" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.374002 4800 scope.go:117] "RemoveContainer" containerID="f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.382702 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2nn24"] Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.390795 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5mtjz"] Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.396119 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5mtjz"] Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.401523 4800 scope.go:117] "RemoveContainer" containerID="f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689" Nov 25 15:23:02 crc kubenswrapper[4800]: E1125 15:23:02.402246 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689\": container with ID starting with f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689 not found: ID does not exist" containerID="f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.402287 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689"} err="failed to get container status \"f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689\": rpc error: code = NotFound desc = could not find container \"f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689\": container with ID starting with f990451c685118c8f74447dd85b72fb17fdc7fbbea0b8a739f63b9a83c034689 not found: ID does not exist" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.738293 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-58c8c7d596-9j29p"] Nov 25 15:23:02 crc kubenswrapper[4800]: E1125 15:23:02.738668 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.738688 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 15:23:02 crc kubenswrapper[4800]: E1125 15:23:02.738711 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" containerName="installer" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.738720 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" containerName="installer" Nov 25 15:23:02 crc kubenswrapper[4800]: E1125 15:23:02.738733 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09e6bcd9-e9bf-408d-9a27-e3d2b7b29960" containerName="route-controller-manager" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.738742 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="09e6bcd9-e9bf-408d-9a27-e3d2b7b29960" containerName="route-controller-manager" Nov 25 15:23:02 crc kubenswrapper[4800]: E1125 15:23:02.738767 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" containerName="controller-manager" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.738776 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" containerName="controller-manager" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.738954 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.738978 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" containerName="controller-manager" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.738992 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="2af2b622-17be-49af-8adc-4ba183fb2e99" containerName="installer" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.739005 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="09e6bcd9-e9bf-408d-9a27-e3d2b7b29960" containerName="route-controller-manager" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.739654 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.742298 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.743531 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.743861 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.744399 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.747711 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.747713 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58c8c7d596-9j29p"] Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.750814 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.755410 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.814429 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-config\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.815033 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-proxy-ca-bundles\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.815083 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-client-ca\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.815120 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-serving-cert\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.815379 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59g9z\" (UniqueName: \"kubernetes.io/projected/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-kube-api-access-59g9z\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.917110 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-client-ca\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.917174 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-serving-cert\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.917242 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59g9z\" (UniqueName: \"kubernetes.io/projected/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-kube-api-access-59g9z\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.917274 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-config\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.917331 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-proxy-ca-bundles\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.918764 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-client-ca\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.919624 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-proxy-ca-bundles\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.920680 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-config\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.925981 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-serving-cert\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.939755 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59g9z\" (UniqueName: \"kubernetes.io/projected/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-kube-api-access-59g9z\") pod \"controller-manager-58c8c7d596-9j29p\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.969569 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p"] Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.971765 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.976356 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.977054 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.977168 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.977345 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.977441 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.978982 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 15:23:02 crc kubenswrapper[4800]: I1125 15:23:02.982339 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p"] Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.019349 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f323cd5f-a443-4196-bc2b-f59c26f6df87-config\") pod \"route-controller-manager-84d79cd4d-89b4p\" (UID: \"f323cd5f-a443-4196-bc2b-f59c26f6df87\") " pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.019436 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z27h9\" (UniqueName: \"kubernetes.io/projected/f323cd5f-a443-4196-bc2b-f59c26f6df87-kube-api-access-z27h9\") pod \"route-controller-manager-84d79cd4d-89b4p\" (UID: \"f323cd5f-a443-4196-bc2b-f59c26f6df87\") " pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.019532 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f323cd5f-a443-4196-bc2b-f59c26f6df87-client-ca\") pod \"route-controller-manager-84d79cd4d-89b4p\" (UID: \"f323cd5f-a443-4196-bc2b-f59c26f6df87\") " pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.019570 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f323cd5f-a443-4196-bc2b-f59c26f6df87-serving-cert\") pod \"route-controller-manager-84d79cd4d-89b4p\" (UID: \"f323cd5f-a443-4196-bc2b-f59c26f6df87\") " pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.057195 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.120777 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f323cd5f-a443-4196-bc2b-f59c26f6df87-config\") pod \"route-controller-manager-84d79cd4d-89b4p\" (UID: \"f323cd5f-a443-4196-bc2b-f59c26f6df87\") " pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.120869 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z27h9\" (UniqueName: \"kubernetes.io/projected/f323cd5f-a443-4196-bc2b-f59c26f6df87-kube-api-access-z27h9\") pod \"route-controller-manager-84d79cd4d-89b4p\" (UID: \"f323cd5f-a443-4196-bc2b-f59c26f6df87\") " pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.120936 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f323cd5f-a443-4196-bc2b-f59c26f6df87-client-ca\") pod \"route-controller-manager-84d79cd4d-89b4p\" (UID: \"f323cd5f-a443-4196-bc2b-f59c26f6df87\") " pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.120974 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f323cd5f-a443-4196-bc2b-f59c26f6df87-serving-cert\") pod \"route-controller-manager-84d79cd4d-89b4p\" (UID: \"f323cd5f-a443-4196-bc2b-f59c26f6df87\") " pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.122435 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f323cd5f-a443-4196-bc2b-f59c26f6df87-config\") pod \"route-controller-manager-84d79cd4d-89b4p\" (UID: \"f323cd5f-a443-4196-bc2b-f59c26f6df87\") " pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.125017 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f323cd5f-a443-4196-bc2b-f59c26f6df87-client-ca\") pod \"route-controller-manager-84d79cd4d-89b4p\" (UID: \"f323cd5f-a443-4196-bc2b-f59c26f6df87\") " pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.126579 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f323cd5f-a443-4196-bc2b-f59c26f6df87-serving-cert\") pod \"route-controller-manager-84d79cd4d-89b4p\" (UID: \"f323cd5f-a443-4196-bc2b-f59c26f6df87\") " pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.155778 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z27h9\" (UniqueName: \"kubernetes.io/projected/f323cd5f-a443-4196-bc2b-f59c26f6df87-kube-api-access-z27h9\") pod \"route-controller-manager-84d79cd4d-89b4p\" (UID: \"f323cd5f-a443-4196-bc2b-f59c26f6df87\") " pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.294382 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.314959 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58c8c7d596-9j29p"] Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.490945 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p"] Nov 25 15:23:03 crc kubenswrapper[4800]: W1125 15:23:03.498440 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf323cd5f_a443_4196_bc2b_f59c26f6df87.slice/crio-abbf53bba713e01322600403b278ec47e22586ace0ab45ac1c106d29b6407c0d WatchSource:0}: Error finding container abbf53bba713e01322600403b278ec47e22586ace0ab45ac1c106d29b6407c0d: Status 404 returned error can't find the container with id abbf53bba713e01322600403b278ec47e22586ace0ab45ac1c106d29b6407c0d Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.792560 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09e6bcd9-e9bf-408d-9a27-e3d2b7b29960" path="/var/lib/kubelet/pods/09e6bcd9-e9bf-408d-9a27-e3d2b7b29960/volumes" Nov 25 15:23:03 crc kubenswrapper[4800]: I1125 15:23:03.794668 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b" path="/var/lib/kubelet/pods/ba82e93a-7f0f-4541-8cfd-3ba993ee4f7b/volumes" Nov 25 15:23:04 crc kubenswrapper[4800]: I1125 15:23:04.390508 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" event={"ID":"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a","Type":"ContainerStarted","Data":"c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc"} Nov 25 15:23:04 crc kubenswrapper[4800]: I1125 15:23:04.391434 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" event={"ID":"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a","Type":"ContainerStarted","Data":"c0068b4aff82be1adb5a96c254c6269599c0fcb9c76ab01ee787a80615cfc416"} Nov 25 15:23:04 crc kubenswrapper[4800]: I1125 15:23:04.391579 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:04 crc kubenswrapper[4800]: I1125 15:23:04.392995 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" event={"ID":"f323cd5f-a443-4196-bc2b-f59c26f6df87","Type":"ContainerStarted","Data":"8394efbdd2a2372a1c51e8a19c89236d081235ed126c36d977bd8ce3e634803d"} Nov 25 15:23:04 crc kubenswrapper[4800]: I1125 15:23:04.393022 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" event={"ID":"f323cd5f-a443-4196-bc2b-f59c26f6df87","Type":"ContainerStarted","Data":"abbf53bba713e01322600403b278ec47e22586ace0ab45ac1c106d29b6407c0d"} Nov 25 15:23:04 crc kubenswrapper[4800]: I1125 15:23:04.393254 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:04 crc kubenswrapper[4800]: I1125 15:23:04.397522 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:04 crc kubenswrapper[4800]: I1125 15:23:04.397715 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" Nov 25 15:23:04 crc kubenswrapper[4800]: I1125 15:23:04.410193 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" podStartSLOduration=2.410174824 podStartE2EDuration="2.410174824s" podCreationTimestamp="2025-11-25 15:23:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:23:04.408898178 +0000 UTC m=+345.463306670" watchObservedRunningTime="2025-11-25 15:23:04.410174824 +0000 UTC m=+345.464583306" Nov 25 15:23:05 crc kubenswrapper[4800]: I1125 15:23:05.585593 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-84d79cd4d-89b4p" podStartSLOduration=4.585564125 podStartE2EDuration="4.585564125s" podCreationTimestamp="2025-11-25 15:23:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:23:04.451595919 +0000 UTC m=+345.506004401" watchObservedRunningTime="2025-11-25 15:23:05.585564125 +0000 UTC m=+346.639972637" Nov 25 15:23:05 crc kubenswrapper[4800]: I1125 15:23:05.594367 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8clxk"] Nov 25 15:23:05 crc kubenswrapper[4800]: I1125 15:23:05.594926 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8clxk" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" containerName="registry-server" containerID="cri-o://e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112" gracePeriod=2 Nov 25 15:23:05 crc kubenswrapper[4800]: I1125 15:23:05.796967 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7bnkb"] Nov 25 15:23:05 crc kubenswrapper[4800]: I1125 15:23:05.797577 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7bnkb" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" containerName="registry-server" containerID="cri-o://24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a" gracePeriod=2 Nov 25 15:23:05 crc kubenswrapper[4800]: E1125 15:23:05.971621 4800 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a is running failed: container process not found" containerID="24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 15:23:05 crc kubenswrapper[4800]: E1125 15:23:05.972163 4800 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a is running failed: container process not found" containerID="24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 15:23:05 crc kubenswrapper[4800]: E1125 15:23:05.972537 4800 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a is running failed: container process not found" containerID="24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 15:23:05 crc kubenswrapper[4800]: E1125 15:23:05.972591 4800 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-7bnkb" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" containerName="registry-server" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.003427 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.067080 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b2c35b-aea8-40f1-af86-ab2ca005e90c-utilities\") pod \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\" (UID: \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\") " Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.067297 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9gkv\" (UniqueName: \"kubernetes.io/projected/00b2c35b-aea8-40f1-af86-ab2ca005e90c-kube-api-access-w9gkv\") pod \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\" (UID: \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\") " Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.067352 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b2c35b-aea8-40f1-af86-ab2ca005e90c-catalog-content\") pod \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\" (UID: \"00b2c35b-aea8-40f1-af86-ab2ca005e90c\") " Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.068414 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00b2c35b-aea8-40f1-af86-ab2ca005e90c-utilities" (OuterVolumeSpecName: "utilities") pod "00b2c35b-aea8-40f1-af86-ab2ca005e90c" (UID: "00b2c35b-aea8-40f1-af86-ab2ca005e90c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.084105 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00b2c35b-aea8-40f1-af86-ab2ca005e90c-kube-api-access-w9gkv" (OuterVolumeSpecName: "kube-api-access-w9gkv") pod "00b2c35b-aea8-40f1-af86-ab2ca005e90c" (UID: "00b2c35b-aea8-40f1-af86-ab2ca005e90c"). InnerVolumeSpecName "kube-api-access-w9gkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.142390 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00b2c35b-aea8-40f1-af86-ab2ca005e90c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "00b2c35b-aea8-40f1-af86-ab2ca005e90c" (UID: "00b2c35b-aea8-40f1-af86-ab2ca005e90c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.170749 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9gkv\" (UniqueName: \"kubernetes.io/projected/00b2c35b-aea8-40f1-af86-ab2ca005e90c-kube-api-access-w9gkv\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.170805 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b2c35b-aea8-40f1-af86-ab2ca005e90c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.170815 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b2c35b-aea8-40f1-af86-ab2ca005e90c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.199573 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.272075 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-utilities\") pod \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\" (UID: \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\") " Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.272184 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tq67\" (UniqueName: \"kubernetes.io/projected/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-kube-api-access-8tq67\") pod \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\" (UID: \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\") " Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.272231 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-catalog-content\") pod \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\" (UID: \"5242e24a-a670-4da1-ad3c-4b13d7b84b6d\") " Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.273237 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-utilities" (OuterVolumeSpecName: "utilities") pod "5242e24a-a670-4da1-ad3c-4b13d7b84b6d" (UID: "5242e24a-a670-4da1-ad3c-4b13d7b84b6d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.279455 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-kube-api-access-8tq67" (OuterVolumeSpecName: "kube-api-access-8tq67") pod "5242e24a-a670-4da1-ad3c-4b13d7b84b6d" (UID: "5242e24a-a670-4da1-ad3c-4b13d7b84b6d"). InnerVolumeSpecName "kube-api-access-8tq67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.331096 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5242e24a-a670-4da1-ad3c-4b13d7b84b6d" (UID: "5242e24a-a670-4da1-ad3c-4b13d7b84b6d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.374370 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tq67\" (UniqueName: \"kubernetes.io/projected/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-kube-api-access-8tq67\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.374424 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.374436 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5242e24a-a670-4da1-ad3c-4b13d7b84b6d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.419061 4800 generic.go:334] "Generic (PLEG): container finished" podID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" containerID="e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112" exitCode=0 Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.419455 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8clxk" event={"ID":"00b2c35b-aea8-40f1-af86-ab2ca005e90c","Type":"ContainerDied","Data":"e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112"} Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.419543 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8clxk" event={"ID":"00b2c35b-aea8-40f1-af86-ab2ca005e90c","Type":"ContainerDied","Data":"dbe4c339fd2d7d510f30785295cefd698cf3f0ec53c3c7ccdc3d67ca5cabb22e"} Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.419588 4800 scope.go:117] "RemoveContainer" containerID="e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.419800 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8clxk" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.423917 4800 generic.go:334] "Generic (PLEG): container finished" podID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" containerID="24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a" exitCode=0 Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.424114 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7bnkb" event={"ID":"5242e24a-a670-4da1-ad3c-4b13d7b84b6d","Type":"ContainerDied","Data":"24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a"} Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.424139 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7bnkb" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.424198 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7bnkb" event={"ID":"5242e24a-a670-4da1-ad3c-4b13d7b84b6d","Type":"ContainerDied","Data":"011f0a36cf33578a1c6e8fbd3fbef256535135cd907416883a4b8fe97933aaec"} Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.458238 4800 scope.go:117] "RemoveContainer" containerID="a76270452566ed60933d6ada9643e50a015dd390e9fdec8cc463a8d7629493a3" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.468167 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8clxk"] Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.473088 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8clxk"] Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.484270 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7bnkb"] Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.484491 4800 scope.go:117] "RemoveContainer" containerID="252ef9b0a5a3ff5eca5204bc9bafd74dfa684f7334a4f13095796a23b4cf11d8" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.487310 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7bnkb"] Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.513538 4800 scope.go:117] "RemoveContainer" containerID="e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112" Nov 25 15:23:06 crc kubenswrapper[4800]: E1125 15:23:06.514359 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112\": container with ID starting with e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112 not found: ID does not exist" containerID="e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.514489 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112"} err="failed to get container status \"e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112\": rpc error: code = NotFound desc = could not find container \"e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112\": container with ID starting with e0cdf4975d9f9f19da5598397ad1f3c27ed9cd4a7874b34bd62fdd75408a9112 not found: ID does not exist" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.514535 4800 scope.go:117] "RemoveContainer" containerID="a76270452566ed60933d6ada9643e50a015dd390e9fdec8cc463a8d7629493a3" Nov 25 15:23:06 crc kubenswrapper[4800]: E1125 15:23:06.515498 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a76270452566ed60933d6ada9643e50a015dd390e9fdec8cc463a8d7629493a3\": container with ID starting with a76270452566ed60933d6ada9643e50a015dd390e9fdec8cc463a8d7629493a3 not found: ID does not exist" containerID="a76270452566ed60933d6ada9643e50a015dd390e9fdec8cc463a8d7629493a3" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.515553 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a76270452566ed60933d6ada9643e50a015dd390e9fdec8cc463a8d7629493a3"} err="failed to get container status \"a76270452566ed60933d6ada9643e50a015dd390e9fdec8cc463a8d7629493a3\": rpc error: code = NotFound desc = could not find container \"a76270452566ed60933d6ada9643e50a015dd390e9fdec8cc463a8d7629493a3\": container with ID starting with a76270452566ed60933d6ada9643e50a015dd390e9fdec8cc463a8d7629493a3 not found: ID does not exist" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.515587 4800 scope.go:117] "RemoveContainer" containerID="252ef9b0a5a3ff5eca5204bc9bafd74dfa684f7334a4f13095796a23b4cf11d8" Nov 25 15:23:06 crc kubenswrapper[4800]: E1125 15:23:06.516028 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"252ef9b0a5a3ff5eca5204bc9bafd74dfa684f7334a4f13095796a23b4cf11d8\": container with ID starting with 252ef9b0a5a3ff5eca5204bc9bafd74dfa684f7334a4f13095796a23b4cf11d8 not found: ID does not exist" containerID="252ef9b0a5a3ff5eca5204bc9bafd74dfa684f7334a4f13095796a23b4cf11d8" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.516070 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"252ef9b0a5a3ff5eca5204bc9bafd74dfa684f7334a4f13095796a23b4cf11d8"} err="failed to get container status \"252ef9b0a5a3ff5eca5204bc9bafd74dfa684f7334a4f13095796a23b4cf11d8\": rpc error: code = NotFound desc = could not find container \"252ef9b0a5a3ff5eca5204bc9bafd74dfa684f7334a4f13095796a23b4cf11d8\": container with ID starting with 252ef9b0a5a3ff5eca5204bc9bafd74dfa684f7334a4f13095796a23b4cf11d8 not found: ID does not exist" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.516121 4800 scope.go:117] "RemoveContainer" containerID="24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.541060 4800 scope.go:117] "RemoveContainer" containerID="e373feee9c9804bd3082499b0b7fa1fc7fa640aa487f32448da4edf4a9d20303" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.556898 4800 scope.go:117] "RemoveContainer" containerID="a402e38affe7a59e8302db473684b120a756a5aa9a20fd587b4cfa57a5cdc6df" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.575075 4800 scope.go:117] "RemoveContainer" containerID="24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a" Nov 25 15:23:06 crc kubenswrapper[4800]: E1125 15:23:06.575649 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a\": container with ID starting with 24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a not found: ID does not exist" containerID="24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.575721 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a"} err="failed to get container status \"24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a\": rpc error: code = NotFound desc = could not find container \"24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a\": container with ID starting with 24c3538923f6e03a3d2c9219446382083e47e2b53d3a3b5a71dd8ca3a9ccc09a not found: ID does not exist" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.575764 4800 scope.go:117] "RemoveContainer" containerID="e373feee9c9804bd3082499b0b7fa1fc7fa640aa487f32448da4edf4a9d20303" Nov 25 15:23:06 crc kubenswrapper[4800]: E1125 15:23:06.576566 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e373feee9c9804bd3082499b0b7fa1fc7fa640aa487f32448da4edf4a9d20303\": container with ID starting with e373feee9c9804bd3082499b0b7fa1fc7fa640aa487f32448da4edf4a9d20303 not found: ID does not exist" containerID="e373feee9c9804bd3082499b0b7fa1fc7fa640aa487f32448da4edf4a9d20303" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.576618 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e373feee9c9804bd3082499b0b7fa1fc7fa640aa487f32448da4edf4a9d20303"} err="failed to get container status \"e373feee9c9804bd3082499b0b7fa1fc7fa640aa487f32448da4edf4a9d20303\": rpc error: code = NotFound desc = could not find container \"e373feee9c9804bd3082499b0b7fa1fc7fa640aa487f32448da4edf4a9d20303\": container with ID starting with e373feee9c9804bd3082499b0b7fa1fc7fa640aa487f32448da4edf4a9d20303 not found: ID does not exist" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.576659 4800 scope.go:117] "RemoveContainer" containerID="a402e38affe7a59e8302db473684b120a756a5aa9a20fd587b4cfa57a5cdc6df" Nov 25 15:23:06 crc kubenswrapper[4800]: E1125 15:23:06.577095 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a402e38affe7a59e8302db473684b120a756a5aa9a20fd587b4cfa57a5cdc6df\": container with ID starting with a402e38affe7a59e8302db473684b120a756a5aa9a20fd587b4cfa57a5cdc6df not found: ID does not exist" containerID="a402e38affe7a59e8302db473684b120a756a5aa9a20fd587b4cfa57a5cdc6df" Nov 25 15:23:06 crc kubenswrapper[4800]: I1125 15:23:06.577156 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a402e38affe7a59e8302db473684b120a756a5aa9a20fd587b4cfa57a5cdc6df"} err="failed to get container status \"a402e38affe7a59e8302db473684b120a756a5aa9a20fd587b4cfa57a5cdc6df\": rpc error: code = NotFound desc = could not find container \"a402e38affe7a59e8302db473684b120a756a5aa9a20fd587b4cfa57a5cdc6df\": container with ID starting with a402e38affe7a59e8302db473684b120a756a5aa9a20fd587b4cfa57a5cdc6df not found: ID does not exist" Nov 25 15:23:07 crc kubenswrapper[4800]: I1125 15:23:07.797624 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" path="/var/lib/kubelet/pods/00b2c35b-aea8-40f1-af86-ab2ca005e90c/volumes" Nov 25 15:23:07 crc kubenswrapper[4800]: I1125 15:23:07.800830 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" path="/var/lib/kubelet/pods/5242e24a-a670-4da1-ad3c-4b13d7b84b6d/volumes" Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.188685 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fx96n"] Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.189210 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fx96n" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" containerName="registry-server" containerID="cri-o://c015c1e9605d339c8830cb934256764a377e0d20c891247b00fc6391552f22b7" gracePeriod=2 Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.388989 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8b5b9"] Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.389272 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8b5b9" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" containerName="registry-server" containerID="cri-o://b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559" gracePeriod=2 Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.455406 4800 generic.go:334] "Generic (PLEG): container finished" podID="79762218-0d90-43f8-a512-a9b95dd3486e" containerID="c015c1e9605d339c8830cb934256764a377e0d20c891247b00fc6391552f22b7" exitCode=0 Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.455483 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx96n" event={"ID":"79762218-0d90-43f8-a512-a9b95dd3486e","Type":"ContainerDied","Data":"c015c1e9605d339c8830cb934256764a377e0d20c891247b00fc6391552f22b7"} Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.678778 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.705810 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpdxj\" (UniqueName: \"kubernetes.io/projected/79762218-0d90-43f8-a512-a9b95dd3486e-kube-api-access-xpdxj\") pod \"79762218-0d90-43f8-a512-a9b95dd3486e\" (UID: \"79762218-0d90-43f8-a512-a9b95dd3486e\") " Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.706051 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79762218-0d90-43f8-a512-a9b95dd3486e-catalog-content\") pod \"79762218-0d90-43f8-a512-a9b95dd3486e\" (UID: \"79762218-0d90-43f8-a512-a9b95dd3486e\") " Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.706084 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79762218-0d90-43f8-a512-a9b95dd3486e-utilities\") pod \"79762218-0d90-43f8-a512-a9b95dd3486e\" (UID: \"79762218-0d90-43f8-a512-a9b95dd3486e\") " Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.706952 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79762218-0d90-43f8-a512-a9b95dd3486e-utilities" (OuterVolumeSpecName: "utilities") pod "79762218-0d90-43f8-a512-a9b95dd3486e" (UID: "79762218-0d90-43f8-a512-a9b95dd3486e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.711664 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79762218-0d90-43f8-a512-a9b95dd3486e-kube-api-access-xpdxj" (OuterVolumeSpecName: "kube-api-access-xpdxj") pod "79762218-0d90-43f8-a512-a9b95dd3486e" (UID: "79762218-0d90-43f8-a512-a9b95dd3486e"). InnerVolumeSpecName "kube-api-access-xpdxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.734635 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79762218-0d90-43f8-a512-a9b95dd3486e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "79762218-0d90-43f8-a512-a9b95dd3486e" (UID: "79762218-0d90-43f8-a512-a9b95dd3486e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.807675 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79762218-0d90-43f8-a512-a9b95dd3486e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.807728 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79762218-0d90-43f8-a512-a9b95dd3486e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.807789 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpdxj\" (UniqueName: \"kubernetes.io/projected/79762218-0d90-43f8-a512-a9b95dd3486e-kube-api-access-xpdxj\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.842421 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.909227 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfpp5\" (UniqueName: \"kubernetes.io/projected/ac983522-6eeb-4141-a7f4-99e9f6f3b480-kube-api-access-tfpp5\") pod \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\" (UID: \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\") " Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.909316 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac983522-6eeb-4141-a7f4-99e9f6f3b480-utilities\") pod \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\" (UID: \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\") " Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.909370 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac983522-6eeb-4141-a7f4-99e9f6f3b480-catalog-content\") pod \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\" (UID: \"ac983522-6eeb-4141-a7f4-99e9f6f3b480\") " Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.910598 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac983522-6eeb-4141-a7f4-99e9f6f3b480-utilities" (OuterVolumeSpecName: "utilities") pod "ac983522-6eeb-4141-a7f4-99e9f6f3b480" (UID: "ac983522-6eeb-4141-a7f4-99e9f6f3b480"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:23:08 crc kubenswrapper[4800]: I1125 15:23:08.913291 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac983522-6eeb-4141-a7f4-99e9f6f3b480-kube-api-access-tfpp5" (OuterVolumeSpecName: "kube-api-access-tfpp5") pod "ac983522-6eeb-4141-a7f4-99e9f6f3b480" (UID: "ac983522-6eeb-4141-a7f4-99e9f6f3b480"). InnerVolumeSpecName "kube-api-access-tfpp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.010677 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfpp5\" (UniqueName: \"kubernetes.io/projected/ac983522-6eeb-4141-a7f4-99e9f6f3b480-kube-api-access-tfpp5\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.010722 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac983522-6eeb-4141-a7f4-99e9f6f3b480-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.022098 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac983522-6eeb-4141-a7f4-99e9f6f3b480-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ac983522-6eeb-4141-a7f4-99e9f6f3b480" (UID: "ac983522-6eeb-4141-a7f4-99e9f6f3b480"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.111372 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac983522-6eeb-4141-a7f4-99e9f6f3b480-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.467913 4800 generic.go:334] "Generic (PLEG): container finished" podID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" containerID="b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559" exitCode=0 Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.467983 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8b5b9" event={"ID":"ac983522-6eeb-4141-a7f4-99e9f6f3b480","Type":"ContainerDied","Data":"b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559"} Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.468018 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8b5b9" event={"ID":"ac983522-6eeb-4141-a7f4-99e9f6f3b480","Type":"ContainerDied","Data":"ae526db6b0c2a79d646b55dc56b93903937123446f1e5d6bdbfcad093f58916d"} Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.468079 4800 scope.go:117] "RemoveContainer" containerID="b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.468186 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8b5b9" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.477395 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx96n" event={"ID":"79762218-0d90-43f8-a512-a9b95dd3486e","Type":"ContainerDied","Data":"6c816a4860a5eaa46939a94776757be4ae9f7c78f41a14fa5f0695ceb1093c84"} Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.477438 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fx96n" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.506147 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8b5b9"] Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.511095 4800 scope.go:117] "RemoveContainer" containerID="e153f8c280f74a0a15fb88ea9bd24b3e33abe19e1349c67a6e2cc547ad8a1915" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.511585 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8b5b9"] Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.532865 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fx96n"] Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.549163 4800 scope.go:117] "RemoveContainer" containerID="3b98b5622eadd85e59dedc3c3845152a56c6ebe952af5a52062dfc52cad814d3" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.549237 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fx96n"] Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.569712 4800 scope.go:117] "RemoveContainer" containerID="b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559" Nov 25 15:23:09 crc kubenswrapper[4800]: E1125 15:23:09.570165 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559\": container with ID starting with b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559 not found: ID does not exist" containerID="b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.570198 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559"} err="failed to get container status \"b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559\": rpc error: code = NotFound desc = could not find container \"b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559\": container with ID starting with b92547092f97e571787812982b14efe10a2f472492378b91999d8f24d828a559 not found: ID does not exist" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.570221 4800 scope.go:117] "RemoveContainer" containerID="e153f8c280f74a0a15fb88ea9bd24b3e33abe19e1349c67a6e2cc547ad8a1915" Nov 25 15:23:09 crc kubenswrapper[4800]: E1125 15:23:09.570531 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e153f8c280f74a0a15fb88ea9bd24b3e33abe19e1349c67a6e2cc547ad8a1915\": container with ID starting with e153f8c280f74a0a15fb88ea9bd24b3e33abe19e1349c67a6e2cc547ad8a1915 not found: ID does not exist" containerID="e153f8c280f74a0a15fb88ea9bd24b3e33abe19e1349c67a6e2cc547ad8a1915" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.570557 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e153f8c280f74a0a15fb88ea9bd24b3e33abe19e1349c67a6e2cc547ad8a1915"} err="failed to get container status \"e153f8c280f74a0a15fb88ea9bd24b3e33abe19e1349c67a6e2cc547ad8a1915\": rpc error: code = NotFound desc = could not find container \"e153f8c280f74a0a15fb88ea9bd24b3e33abe19e1349c67a6e2cc547ad8a1915\": container with ID starting with e153f8c280f74a0a15fb88ea9bd24b3e33abe19e1349c67a6e2cc547ad8a1915 not found: ID does not exist" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.570572 4800 scope.go:117] "RemoveContainer" containerID="3b98b5622eadd85e59dedc3c3845152a56c6ebe952af5a52062dfc52cad814d3" Nov 25 15:23:09 crc kubenswrapper[4800]: E1125 15:23:09.570792 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b98b5622eadd85e59dedc3c3845152a56c6ebe952af5a52062dfc52cad814d3\": container with ID starting with 3b98b5622eadd85e59dedc3c3845152a56c6ebe952af5a52062dfc52cad814d3 not found: ID does not exist" containerID="3b98b5622eadd85e59dedc3c3845152a56c6ebe952af5a52062dfc52cad814d3" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.570816 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b98b5622eadd85e59dedc3c3845152a56c6ebe952af5a52062dfc52cad814d3"} err="failed to get container status \"3b98b5622eadd85e59dedc3c3845152a56c6ebe952af5a52062dfc52cad814d3\": rpc error: code = NotFound desc = could not find container \"3b98b5622eadd85e59dedc3c3845152a56c6ebe952af5a52062dfc52cad814d3\": container with ID starting with 3b98b5622eadd85e59dedc3c3845152a56c6ebe952af5a52062dfc52cad814d3 not found: ID does not exist" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.570828 4800 scope.go:117] "RemoveContainer" containerID="c015c1e9605d339c8830cb934256764a377e0d20c891247b00fc6391552f22b7" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.595513 4800 scope.go:117] "RemoveContainer" containerID="b5b16d9802a93f4f36366080d09ede24053e2c077df74f9dd7dce9a4c692637f" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.617559 4800 scope.go:117] "RemoveContainer" containerID="6af55e537855361244aa4eb3b047458d89ed1cffb2c5a2888fcf3e930705c7a5" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.809467 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" path="/var/lib/kubelet/pods/79762218-0d90-43f8-a512-a9b95dd3486e/volumes" Nov 25 15:23:09 crc kubenswrapper[4800]: I1125 15:23:09.811244 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" path="/var/lib/kubelet/pods/ac983522-6eeb-4141-a7f4-99e9f6f3b480/volumes" Nov 25 15:23:16 crc kubenswrapper[4800]: I1125 15:23:16.854548 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58c8c7d596-9j29p"] Nov 25 15:23:16 crc kubenswrapper[4800]: I1125 15:23:16.855456 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" podUID="2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a" containerName="controller-manager" containerID="cri-o://c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc" gracePeriod=30 Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.356809 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.441283 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59g9z\" (UniqueName: \"kubernetes.io/projected/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-kube-api-access-59g9z\") pod \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.441349 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-config\") pod \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.441418 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-client-ca\") pod \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.441445 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-serving-cert\") pod \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.441463 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-proxy-ca-bundles\") pod \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\" (UID: \"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a\") " Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.442364 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a" (UID: "2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.442374 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-client-ca" (OuterVolumeSpecName: "client-ca") pod "2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a" (UID: "2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.442758 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-config" (OuterVolumeSpecName: "config") pod "2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a" (UID: "2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.453050 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-kube-api-access-59g9z" (OuterVolumeSpecName: "kube-api-access-59g9z") pod "2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a" (UID: "2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a"). InnerVolumeSpecName "kube-api-access-59g9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.453060 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a" (UID: "2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.534291 4800 generic.go:334] "Generic (PLEG): container finished" podID="2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a" containerID="c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc" exitCode=0 Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.534332 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" event={"ID":"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a","Type":"ContainerDied","Data":"c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc"} Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.534362 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" event={"ID":"2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a","Type":"ContainerDied","Data":"c0068b4aff82be1adb5a96c254c6269599c0fcb9c76ab01ee787a80615cfc416"} Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.534378 4800 scope.go:117] "RemoveContainer" containerID="c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.534481 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58c8c7d596-9j29p" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.542801 4800 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.542868 4800 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.542888 4800 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.542911 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59g9z\" (UniqueName: \"kubernetes.io/projected/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-kube-api-access-59g9z\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.542930 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.557615 4800 scope.go:117] "RemoveContainer" containerID="c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.558132 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc\": container with ID starting with c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc not found: ID does not exist" containerID="c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.558184 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc"} err="failed to get container status \"c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc\": rpc error: code = NotFound desc = could not find container \"c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc\": container with ID starting with c2062b7f0c901287ecafd789c2008aae16f5c980aeedfacac6c2767812d3cabc not found: ID does not exist" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.575792 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58c8c7d596-9j29p"] Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.581148 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-58c8c7d596-9j29p"] Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.808425 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a" path="/var/lib/kubelet/pods/2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a/volumes" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978352 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-55bc4dcd67-67wmj"] Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978578 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" containerName="registry-server" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978592 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" containerName="registry-server" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978604 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" containerName="extract-content" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978612 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" containerName="extract-content" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978625 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" containerName="extract-content" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978635 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" containerName="extract-content" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978650 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" containerName="extract-content" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978657 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" containerName="extract-content" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978667 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" containerName="extract-utilities" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978675 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" containerName="extract-utilities" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978689 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" containerName="extract-content" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978697 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" containerName="extract-content" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978708 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" containerName="registry-server" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978716 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" containerName="registry-server" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978729 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" containerName="extract-utilities" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978737 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" containerName="extract-utilities" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978746 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" containerName="registry-server" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978754 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" containerName="registry-server" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978764 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a" containerName="controller-manager" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978771 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a" containerName="controller-manager" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978783 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" containerName="extract-utilities" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978792 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" containerName="extract-utilities" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978803 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" containerName="registry-server" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978810 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" containerName="registry-server" Nov 25 15:23:17 crc kubenswrapper[4800]: E1125 15:23:17.978824 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" containerName="extract-utilities" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978834 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" containerName="extract-utilities" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978981 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cc8cb47-cbb0-4419-b21a-b6d6a27a6e9a" containerName="controller-manager" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.978998 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="00b2c35b-aea8-40f1-af86-ab2ca005e90c" containerName="registry-server" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.979007 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac983522-6eeb-4141-a7f4-99e9f6f3b480" containerName="registry-server" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.979020 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="5242e24a-a670-4da1-ad3c-4b13d7b84b6d" containerName="registry-server" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.979029 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="79762218-0d90-43f8-a512-a9b95dd3486e" containerName="registry-server" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.979428 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.982961 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.983528 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.984089 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.984193 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.984200 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.984339 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.990138 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 15:23:17 crc kubenswrapper[4800]: I1125 15:23:17.999784 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55bc4dcd67-67wmj"] Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.048130 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74da6170-15bf-41a8-8940-3fbbe9213a60-serving-cert\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.048200 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/74da6170-15bf-41a8-8940-3fbbe9213a60-proxy-ca-bundles\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.048241 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/74da6170-15bf-41a8-8940-3fbbe9213a60-client-ca\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.048292 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74da6170-15bf-41a8-8940-3fbbe9213a60-config\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.048572 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cj9xh\" (UniqueName: \"kubernetes.io/projected/74da6170-15bf-41a8-8940-3fbbe9213a60-kube-api-access-cj9xh\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.150347 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cj9xh\" (UniqueName: \"kubernetes.io/projected/74da6170-15bf-41a8-8940-3fbbe9213a60-kube-api-access-cj9xh\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.150406 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74da6170-15bf-41a8-8940-3fbbe9213a60-serving-cert\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.150433 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/74da6170-15bf-41a8-8940-3fbbe9213a60-proxy-ca-bundles\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.150455 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/74da6170-15bf-41a8-8940-3fbbe9213a60-client-ca\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.150488 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74da6170-15bf-41a8-8940-3fbbe9213a60-config\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.152062 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/74da6170-15bf-41a8-8940-3fbbe9213a60-proxy-ca-bundles\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.152153 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74da6170-15bf-41a8-8940-3fbbe9213a60-config\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.152379 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/74da6170-15bf-41a8-8940-3fbbe9213a60-client-ca\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.165983 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74da6170-15bf-41a8-8940-3fbbe9213a60-serving-cert\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.184463 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cj9xh\" (UniqueName: \"kubernetes.io/projected/74da6170-15bf-41a8-8940-3fbbe9213a60-kube-api-access-cj9xh\") pod \"controller-manager-55bc4dcd67-67wmj\" (UID: \"74da6170-15bf-41a8-8940-3fbbe9213a60\") " pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.301264 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:18 crc kubenswrapper[4800]: I1125 15:23:18.698359 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55bc4dcd67-67wmj"] Nov 25 15:23:19 crc kubenswrapper[4800]: I1125 15:23:19.549042 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" event={"ID":"74da6170-15bf-41a8-8940-3fbbe9213a60","Type":"ContainerStarted","Data":"8773d095211a326298ea7c601b19daf870124833cd97909477725c0c6a1a6847"} Nov 25 15:23:19 crc kubenswrapper[4800]: I1125 15:23:19.549439 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" event={"ID":"74da6170-15bf-41a8-8940-3fbbe9213a60","Type":"ContainerStarted","Data":"2031b5f6761fd98a0e072768a984f101ec73736ad06f98022998e59008f97302"} Nov 25 15:23:19 crc kubenswrapper[4800]: I1125 15:23:19.549475 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:19 crc kubenswrapper[4800]: I1125 15:23:19.553504 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" Nov 25 15:23:19 crc kubenswrapper[4800]: I1125 15:23:19.573285 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-55bc4dcd67-67wmj" podStartSLOduration=3.57326532 podStartE2EDuration="3.57326532s" podCreationTimestamp="2025-11-25 15:23:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:23:19.568833079 +0000 UTC m=+360.623241631" watchObservedRunningTime="2025-11-25 15:23:19.57326532 +0000 UTC m=+360.627673813" Nov 25 15:23:26 crc kubenswrapper[4800]: I1125 15:23:26.287757 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-r4fbv"] Nov 25 15:23:42 crc kubenswrapper[4800]: I1125 15:23:42.640417 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:23:42 crc kubenswrapper[4800]: I1125 15:23:42.641476 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.320237 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" podUID="56f1dabd-4d11-4dc4-9961-efac4124e4a5" containerName="oauth-openshift" containerID="cri-o://5841d34da310770db993b72b381be6d55f0cda9de946e28d02ba16e49149e8b4" gracePeriod=15 Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.746630 4800 generic.go:334] "Generic (PLEG): container finished" podID="56f1dabd-4d11-4dc4-9961-efac4124e4a5" containerID="5841d34da310770db993b72b381be6d55f0cda9de946e28d02ba16e49149e8b4" exitCode=0 Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.746693 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" event={"ID":"56f1dabd-4d11-4dc4-9961-efac4124e4a5","Type":"ContainerDied","Data":"5841d34da310770db993b72b381be6d55f0cda9de946e28d02ba16e49149e8b4"} Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.746751 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" event={"ID":"56f1dabd-4d11-4dc4-9961-efac4124e4a5","Type":"ContainerDied","Data":"29b38abc898e9f9f72bde729ce2cbd6637d67e7c62298b21b52ce2691eaaf8ce"} Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.746766 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29b38abc898e9f9f72bde729ce2cbd6637d67e7c62298b21b52ce2691eaaf8ce" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.777687 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.814022 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-b7d5b84cf-pjw76"] Nov 25 15:23:51 crc kubenswrapper[4800]: E1125 15:23:51.814476 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56f1dabd-4d11-4dc4-9961-efac4124e4a5" containerName="oauth-openshift" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.814561 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="56f1dabd-4d11-4dc4-9961-efac4124e4a5" containerName="oauth-openshift" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.814791 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="56f1dabd-4d11-4dc4-9961-efac4124e4a5" containerName="oauth-openshift" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.815399 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.867526 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-b7d5b84cf-pjw76"] Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.921745 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-error\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.921803 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-serving-cert\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922194 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-idp-0-file-data\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922251 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-trusted-ca-bundle\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922286 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-login\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922315 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-router-certs\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922341 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-audit-policies\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922367 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-cliconfig\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922397 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-ocp-branding-template\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922430 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-service-ca\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922456 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j9tqq\" (UniqueName: \"kubernetes.io/projected/56f1dabd-4d11-4dc4-9961-efac4124e4a5-kube-api-access-j9tqq\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922493 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-session\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922540 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-provider-selection\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922562 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56f1dabd-4d11-4dc4-9961-efac4124e4a5-audit-dir\") pod \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\" (UID: \"56f1dabd-4d11-4dc4-9961-efac4124e4a5\") " Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922642 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-router-certs\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922679 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mbdv\" (UniqueName: \"kubernetes.io/projected/0b701ddb-a112-4067-8cfd-59d18f57b301-kube-api-access-5mbdv\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922704 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922725 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-session\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922801 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-user-template-error\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922899 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0b701ddb-a112-4067-8cfd-59d18f57b301-audit-dir\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.922927 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.923051 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0b701ddb-a112-4067-8cfd-59d18f57b301-audit-policies\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.923102 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-service-ca\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.923130 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.923162 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-cliconfig\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.923189 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-serving-cert\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.923214 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-user-template-login\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.923244 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.923579 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/56f1dabd-4d11-4dc4-9961-efac4124e4a5-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.923708 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.924994 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.927128 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.927124 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.927165 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.927728 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.928130 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.928670 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.929016 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56f1dabd-4d11-4dc4-9961-efac4124e4a5-kube-api-access-j9tqq" (OuterVolumeSpecName: "kube-api-access-j9tqq") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "kube-api-access-j9tqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.929095 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.929686 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.929814 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:23:51 crc kubenswrapper[4800]: I1125 15:23:51.929822 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "56f1dabd-4d11-4dc4-9961-efac4124e4a5" (UID: "56f1dabd-4d11-4dc4-9961-efac4124e4a5"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.023643 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-user-template-error\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.023760 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0b701ddb-a112-4067-8cfd-59d18f57b301-audit-dir\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.023802 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.023865 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0b701ddb-a112-4067-8cfd-59d18f57b301-audit-policies\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.023899 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0b701ddb-a112-4067-8cfd-59d18f57b301-audit-dir\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.023910 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-service-ca\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.023949 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.023973 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-cliconfig\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.023992 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-serving-cert\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024011 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-user-template-login\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024028 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024049 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-router-certs\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024072 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mbdv\" (UniqueName: \"kubernetes.io/projected/0b701ddb-a112-4067-8cfd-59d18f57b301-kube-api-access-5mbdv\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024088 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024104 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-session\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024152 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024163 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024174 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024184 4800 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024194 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024204 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024212 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024221 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j9tqq\" (UniqueName: \"kubernetes.io/projected/56f1dabd-4d11-4dc4-9961-efac4124e4a5-kube-api-access-j9tqq\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024231 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024239 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024249 4800 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56f1dabd-4d11-4dc4-9961-efac4124e4a5-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024257 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024266 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.024275 4800 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56f1dabd-4d11-4dc4-9961-efac4124e4a5-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.025255 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-service-ca\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.025591 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-cliconfig\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.025600 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0b701ddb-a112-4067-8cfd-59d18f57b301-audit-policies\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.026036 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.027226 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-user-template-error\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.027875 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-router-certs\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.028142 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-session\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.029122 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.029489 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.029674 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-user-template-login\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.029898 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-system-serving-cert\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.030877 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0b701ddb-a112-4067-8cfd-59d18f57b301-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.039891 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mbdv\" (UniqueName: \"kubernetes.io/projected/0b701ddb-a112-4067-8cfd-59d18f57b301-kube-api-access-5mbdv\") pod \"oauth-openshift-b7d5b84cf-pjw76\" (UID: \"0b701ddb-a112-4067-8cfd-59d18f57b301\") " pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.128636 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.566410 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-b7d5b84cf-pjw76"] Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.754926 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" event={"ID":"0b701ddb-a112-4067-8cfd-59d18f57b301","Type":"ContainerStarted","Data":"840b56445946808454c1eb05b30fc83d7011cf4717796ddf5e8f01e7d677c5ab"} Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.754949 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-r4fbv" Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.785161 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-r4fbv"] Nov 25 15:23:52 crc kubenswrapper[4800]: I1125 15:23:52.793150 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-r4fbv"] Nov 25 15:23:53 crc kubenswrapper[4800]: I1125 15:23:53.761254 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" event={"ID":"0b701ddb-a112-4067-8cfd-59d18f57b301","Type":"ContainerStarted","Data":"5e78ef48530ca9ba63f81fe6ac004f14faeb2db633bbda46842e2668902ea8aa"} Nov 25 15:23:53 crc kubenswrapper[4800]: I1125 15:23:53.761496 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:53 crc kubenswrapper[4800]: I1125 15:23:53.767322 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" Nov 25 15:23:53 crc kubenswrapper[4800]: I1125 15:23:53.794362 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56f1dabd-4d11-4dc4-9961-efac4124e4a5" path="/var/lib/kubelet/pods/56f1dabd-4d11-4dc4-9961-efac4124e4a5/volumes" Nov 25 15:23:53 crc kubenswrapper[4800]: I1125 15:23:53.810397 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" podStartSLOduration=27.810375315 podStartE2EDuration="27.810375315s" podCreationTimestamp="2025-11-25 15:23:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:23:53.782370156 +0000 UTC m=+394.836778648" watchObservedRunningTime="2025-11-25 15:23:53.810375315 +0000 UTC m=+394.864783797" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.365944 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4mb2k"] Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.367148 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4mb2k" podUID="16615745-a673-44e3-8cd7-980d59c421ad" containerName="registry-server" containerID="cri-o://ef0604839604a02c74f9c7e61e87ec37a9d2b967d655b06cca1f4c15236b37c7" gracePeriod=30 Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.373541 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6td7n"] Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.374903 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6td7n" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" containerName="registry-server" containerID="cri-o://fe67d5fcee66a87c3ab1b2cdedbf1d6aeddd0fcdccede724fd53207caee118df" gracePeriod=30 Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.382594 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5h48t"] Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.382854 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" containerID="cri-o://07c4fa3b7db534e9673f1d9350b7e832d3595607c5be7a5f9e3ad5a8586326e4" gracePeriod=30 Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.393178 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xgrss"] Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.393425 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xgrss" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" containerName="registry-server" containerID="cri-o://ae319b11dc4821c31f81de0afc767dc5b3810f3501512efe816d44d3bb85113f" gracePeriod=30 Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.407527 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fhrvr"] Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.408471 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.413384 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hpkx7"] Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.413609 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hpkx7" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" containerName="registry-server" containerID="cri-o://8d28ee96a3006ec0a5807361e3dfc85f38ca7627ecad4762dac1e4ab865c9f19" gracePeriod=30 Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.416975 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fhrvr"] Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.525213 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e6a5f505-c1f9-471a-b60a-97a39222f7bb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fhrvr\" (UID: \"e6a5f505-c1f9-471a-b60a-97a39222f7bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.525287 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e6a5f505-c1f9-471a-b60a-97a39222f7bb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fhrvr\" (UID: \"e6a5f505-c1f9-471a-b60a-97a39222f7bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.525351 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45hv2\" (UniqueName: \"kubernetes.io/projected/e6a5f505-c1f9-471a-b60a-97a39222f7bb-kube-api-access-45hv2\") pod \"marketplace-operator-79b997595-fhrvr\" (UID: \"e6a5f505-c1f9-471a-b60a-97a39222f7bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.626327 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e6a5f505-c1f9-471a-b60a-97a39222f7bb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fhrvr\" (UID: \"e6a5f505-c1f9-471a-b60a-97a39222f7bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.626381 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e6a5f505-c1f9-471a-b60a-97a39222f7bb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fhrvr\" (UID: \"e6a5f505-c1f9-471a-b60a-97a39222f7bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.626434 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45hv2\" (UniqueName: \"kubernetes.io/projected/e6a5f505-c1f9-471a-b60a-97a39222f7bb-kube-api-access-45hv2\") pod \"marketplace-operator-79b997595-fhrvr\" (UID: \"e6a5f505-c1f9-471a-b60a-97a39222f7bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.627534 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e6a5f505-c1f9-471a-b60a-97a39222f7bb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fhrvr\" (UID: \"e6a5f505-c1f9-471a-b60a-97a39222f7bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.634535 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e6a5f505-c1f9-471a-b60a-97a39222f7bb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fhrvr\" (UID: \"e6a5f505-c1f9-471a-b60a-97a39222f7bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.640500 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.640563 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.646422 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45hv2\" (UniqueName: \"kubernetes.io/projected/e6a5f505-c1f9-471a-b60a-97a39222f7bb-kube-api-access-45hv2\") pod \"marketplace-operator-79b997595-fhrvr\" (UID: \"e6a5f505-c1f9-471a-b60a-97a39222f7bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.733108 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.900078 4800 generic.go:334] "Generic (PLEG): container finished" podID="16615745-a673-44e3-8cd7-980d59c421ad" containerID="ef0604839604a02c74f9c7e61e87ec37a9d2b967d655b06cca1f4c15236b37c7" exitCode=0 Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.900257 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mb2k" event={"ID":"16615745-a673-44e3-8cd7-980d59c421ad","Type":"ContainerDied","Data":"ef0604839604a02c74f9c7e61e87ec37a9d2b967d655b06cca1f4c15236b37c7"} Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.904243 4800 generic.go:334] "Generic (PLEG): container finished" podID="1b030df0-0b5c-4854-bdaf-6b61067bed50" containerID="8d28ee96a3006ec0a5807361e3dfc85f38ca7627ecad4762dac1e4ab865c9f19" exitCode=0 Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.904295 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hpkx7" event={"ID":"1b030df0-0b5c-4854-bdaf-6b61067bed50","Type":"ContainerDied","Data":"8d28ee96a3006ec0a5807361e3dfc85f38ca7627ecad4762dac1e4ab865c9f19"} Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.907277 4800 generic.go:334] "Generic (PLEG): container finished" podID="40b1358b-2b78-4d92-8e03-baf11a6aecde" containerID="ae319b11dc4821c31f81de0afc767dc5b3810f3501512efe816d44d3bb85113f" exitCode=0 Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.907317 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgrss" event={"ID":"40b1358b-2b78-4d92-8e03-baf11a6aecde","Type":"ContainerDied","Data":"ae319b11dc4821c31f81de0afc767dc5b3810f3501512efe816d44d3bb85113f"} Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.909877 4800 generic.go:334] "Generic (PLEG): container finished" podID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" containerID="fe67d5fcee66a87c3ab1b2cdedbf1d6aeddd0fcdccede724fd53207caee118df" exitCode=0 Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.909926 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6td7n" event={"ID":"932f12b1-d6ce-4e42-b70f-6cd51c1082a1","Type":"ContainerDied","Data":"fe67d5fcee66a87c3ab1b2cdedbf1d6aeddd0fcdccede724fd53207caee118df"} Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.913372 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-5h48t_c50787f1-b3aa-49be-adc2-610beeeede6d/marketplace-operator/1.log" Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.913409 4800 generic.go:334] "Generic (PLEG): container finished" podID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerID="07c4fa3b7db534e9673f1d9350b7e832d3595607c5be7a5f9e3ad5a8586326e4" exitCode=0 Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.913429 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" event={"ID":"c50787f1-b3aa-49be-adc2-610beeeede6d","Type":"ContainerDied","Data":"07c4fa3b7db534e9673f1d9350b7e832d3595607c5be7a5f9e3ad5a8586326e4"} Nov 25 15:24:12 crc kubenswrapper[4800]: I1125 15:24:12.913456 4800 scope.go:117] "RemoveContainer" containerID="a30f6cdb64a696f783a529ff121cdeb5e41c8873df0cd1693be7ffa61d861173" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.170940 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fhrvr"] Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.404044 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.540326 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-utilities\") pod \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\" (UID: \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.540459 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rt2v2\" (UniqueName: \"kubernetes.io/projected/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-kube-api-access-rt2v2\") pod \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\" (UID: \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.540504 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-catalog-content\") pod \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\" (UID: \"932f12b1-d6ce-4e42-b70f-6cd51c1082a1\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.545399 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-utilities" (OuterVolumeSpecName: "utilities") pod "932f12b1-d6ce-4e42-b70f-6cd51c1082a1" (UID: "932f12b1-d6ce-4e42-b70f-6cd51c1082a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.555170 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-kube-api-access-rt2v2" (OuterVolumeSpecName: "kube-api-access-rt2v2") pod "932f12b1-d6ce-4e42-b70f-6cd51c1082a1" (UID: "932f12b1-d6ce-4e42-b70f-6cd51c1082a1"). InnerVolumeSpecName "kube-api-access-rt2v2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.582725 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.616054 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "932f12b1-d6ce-4e42-b70f-6cd51c1082a1" (UID: "932f12b1-d6ce-4e42-b70f-6cd51c1082a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.642878 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.642943 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rt2v2\" (UniqueName: \"kubernetes.io/projected/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-kube-api-access-rt2v2\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.642962 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932f12b1-d6ce-4e42-b70f-6cd51c1082a1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.718890 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.720390 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.727180 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.743579 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knkrs\" (UniqueName: \"kubernetes.io/projected/16615745-a673-44e3-8cd7-980d59c421ad-kube-api-access-knkrs\") pod \"16615745-a673-44e3-8cd7-980d59c421ad\" (UID: \"16615745-a673-44e3-8cd7-980d59c421ad\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.743697 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16615745-a673-44e3-8cd7-980d59c421ad-utilities\") pod \"16615745-a673-44e3-8cd7-980d59c421ad\" (UID: \"16615745-a673-44e3-8cd7-980d59c421ad\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.743804 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16615745-a673-44e3-8cd7-980d59c421ad-catalog-content\") pod \"16615745-a673-44e3-8cd7-980d59c421ad\" (UID: \"16615745-a673-44e3-8cd7-980d59c421ad\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.746078 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16615745-a673-44e3-8cd7-980d59c421ad-utilities" (OuterVolumeSpecName: "utilities") pod "16615745-a673-44e3-8cd7-980d59c421ad" (UID: "16615745-a673-44e3-8cd7-980d59c421ad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.748792 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16615745-a673-44e3-8cd7-980d59c421ad-kube-api-access-knkrs" (OuterVolumeSpecName: "kube-api-access-knkrs") pod "16615745-a673-44e3-8cd7-980d59c421ad" (UID: "16615745-a673-44e3-8cd7-980d59c421ad"). InnerVolumeSpecName "kube-api-access-knkrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.806533 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16615745-a673-44e3-8cd7-980d59c421ad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "16615745-a673-44e3-8cd7-980d59c421ad" (UID: "16615745-a673-44e3-8cd7-980d59c421ad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.845568 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b030df0-0b5c-4854-bdaf-6b61067bed50-utilities\") pod \"1b030df0-0b5c-4854-bdaf-6b61067bed50\" (UID: \"1b030df0-0b5c-4854-bdaf-6b61067bed50\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.845696 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c50787f1-b3aa-49be-adc2-610beeeede6d-marketplace-operator-metrics\") pod \"c50787f1-b3aa-49be-adc2-610beeeede6d\" (UID: \"c50787f1-b3aa-49be-adc2-610beeeede6d\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.845760 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shs9d\" (UniqueName: \"kubernetes.io/projected/40b1358b-2b78-4d92-8e03-baf11a6aecde-kube-api-access-shs9d\") pod \"40b1358b-2b78-4d92-8e03-baf11a6aecde\" (UID: \"40b1358b-2b78-4d92-8e03-baf11a6aecde\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.845798 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40b1358b-2b78-4d92-8e03-baf11a6aecde-catalog-content\") pod \"40b1358b-2b78-4d92-8e03-baf11a6aecde\" (UID: \"40b1358b-2b78-4d92-8e03-baf11a6aecde\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.845891 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6fm5b\" (UniqueName: \"kubernetes.io/projected/1b030df0-0b5c-4854-bdaf-6b61067bed50-kube-api-access-6fm5b\") pod \"1b030df0-0b5c-4854-bdaf-6b61067bed50\" (UID: \"1b030df0-0b5c-4854-bdaf-6b61067bed50\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.845920 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xh56\" (UniqueName: \"kubernetes.io/projected/c50787f1-b3aa-49be-adc2-610beeeede6d-kube-api-access-7xh56\") pod \"c50787f1-b3aa-49be-adc2-610beeeede6d\" (UID: \"c50787f1-b3aa-49be-adc2-610beeeede6d\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.845995 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c50787f1-b3aa-49be-adc2-610beeeede6d-marketplace-trusted-ca\") pod \"c50787f1-b3aa-49be-adc2-610beeeede6d\" (UID: \"c50787f1-b3aa-49be-adc2-610beeeede6d\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.846082 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40b1358b-2b78-4d92-8e03-baf11a6aecde-utilities\") pod \"40b1358b-2b78-4d92-8e03-baf11a6aecde\" (UID: \"40b1358b-2b78-4d92-8e03-baf11a6aecde\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.846148 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b030df0-0b5c-4854-bdaf-6b61067bed50-catalog-content\") pod \"1b030df0-0b5c-4854-bdaf-6b61067bed50\" (UID: \"1b030df0-0b5c-4854-bdaf-6b61067bed50\") " Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.846631 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16615745-a673-44e3-8cd7-980d59c421ad-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.846677 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16615745-a673-44e3-8cd7-980d59c421ad-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.846694 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knkrs\" (UniqueName: \"kubernetes.io/projected/16615745-a673-44e3-8cd7-980d59c421ad-kube-api-access-knkrs\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.850031 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b030df0-0b5c-4854-bdaf-6b61067bed50-utilities" (OuterVolumeSpecName: "utilities") pod "1b030df0-0b5c-4854-bdaf-6b61067bed50" (UID: "1b030df0-0b5c-4854-bdaf-6b61067bed50"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.850038 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c50787f1-b3aa-49be-adc2-610beeeede6d-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "c50787f1-b3aa-49be-adc2-610beeeede6d" (UID: "c50787f1-b3aa-49be-adc2-610beeeede6d"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.850415 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40b1358b-2b78-4d92-8e03-baf11a6aecde-utilities" (OuterVolumeSpecName: "utilities") pod "40b1358b-2b78-4d92-8e03-baf11a6aecde" (UID: "40b1358b-2b78-4d92-8e03-baf11a6aecde"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.854040 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c50787f1-b3aa-49be-adc2-610beeeede6d-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "c50787f1-b3aa-49be-adc2-610beeeede6d" (UID: "c50787f1-b3aa-49be-adc2-610beeeede6d"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.855681 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c50787f1-b3aa-49be-adc2-610beeeede6d-kube-api-access-7xh56" (OuterVolumeSpecName: "kube-api-access-7xh56") pod "c50787f1-b3aa-49be-adc2-610beeeede6d" (UID: "c50787f1-b3aa-49be-adc2-610beeeede6d"). InnerVolumeSpecName "kube-api-access-7xh56". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.861749 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40b1358b-2b78-4d92-8e03-baf11a6aecde-kube-api-access-shs9d" (OuterVolumeSpecName: "kube-api-access-shs9d") pod "40b1358b-2b78-4d92-8e03-baf11a6aecde" (UID: "40b1358b-2b78-4d92-8e03-baf11a6aecde"). InnerVolumeSpecName "kube-api-access-shs9d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.873056 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40b1358b-2b78-4d92-8e03-baf11a6aecde-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "40b1358b-2b78-4d92-8e03-baf11a6aecde" (UID: "40b1358b-2b78-4d92-8e03-baf11a6aecde"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.877692 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b030df0-0b5c-4854-bdaf-6b61067bed50-kube-api-access-6fm5b" (OuterVolumeSpecName: "kube-api-access-6fm5b") pod "1b030df0-0b5c-4854-bdaf-6b61067bed50" (UID: "1b030df0-0b5c-4854-bdaf-6b61067bed50"). InnerVolumeSpecName "kube-api-access-6fm5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.921759 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hpkx7" event={"ID":"1b030df0-0b5c-4854-bdaf-6b61067bed50","Type":"ContainerDied","Data":"38ad52fcdbd15179f3010d24b720b69d5631f44e0edfaa4c1ce1b8002d892531"} Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.922391 4800 scope.go:117] "RemoveContainer" containerID="8d28ee96a3006ec0a5807361e3dfc85f38ca7627ecad4762dac1e4ab865c9f19" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.921859 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hpkx7" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.925360 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgrss" event={"ID":"40b1358b-2b78-4d92-8e03-baf11a6aecde","Type":"ContainerDied","Data":"4e9d27ea9e9afee4ed198c07cb6b1cbc58f0975e46e98fb120a015472ad78f3b"} Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.925463 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xgrss" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.931752 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6td7n" event={"ID":"932f12b1-d6ce-4e42-b70f-6cd51c1082a1","Type":"ContainerDied","Data":"e3b99a6de1fddd3c43b7916988ec9c2a856ed51e6d643395cf2f02d2607dc6f7"} Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.932432 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6td7n" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.935067 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" event={"ID":"e6a5f505-c1f9-471a-b60a-97a39222f7bb","Type":"ContainerStarted","Data":"3bddec91fff9a79323411b82afaebf0263b4736bb23b67e78438ba65883626ab"} Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.935129 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" event={"ID":"e6a5f505-c1f9-471a-b60a-97a39222f7bb","Type":"ContainerStarted","Data":"19ee6c826b2ecd6592a9193b58c99130fe3639cf8fab9822ae5a23a82207664d"} Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.935824 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.938006 4800 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-fhrvr container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.61:8080/healthz\": dial tcp 10.217.0.61:8080: connect: connection refused" start-of-body= Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.938096 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" podUID="e6a5f505-c1f9-471a-b60a-97a39222f7bb" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.61:8080/healthz\": dial tcp 10.217.0.61:8080: connect: connection refused" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.938614 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.938710 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5h48t" event={"ID":"c50787f1-b3aa-49be-adc2-610beeeede6d","Type":"ContainerDied","Data":"99d6a6b9bfaa8329082bf54417f9833b10469a9c3f0d1c9c3066056c769a85a2"} Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.951659 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6fm5b\" (UniqueName: \"kubernetes.io/projected/1b030df0-0b5c-4854-bdaf-6b61067bed50-kube-api-access-6fm5b\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.951885 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xh56\" (UniqueName: \"kubernetes.io/projected/c50787f1-b3aa-49be-adc2-610beeeede6d-kube-api-access-7xh56\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.952475 4800 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c50787f1-b3aa-49be-adc2-610beeeede6d-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.952591 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40b1358b-2b78-4d92-8e03-baf11a6aecde-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.952684 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b030df0-0b5c-4854-bdaf-6b61067bed50-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.952772 4800 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c50787f1-b3aa-49be-adc2-610beeeede6d-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.952931 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shs9d\" (UniqueName: \"kubernetes.io/projected/40b1358b-2b78-4d92-8e03-baf11a6aecde-kube-api-access-shs9d\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.953025 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40b1358b-2b78-4d92-8e03-baf11a6aecde-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.955912 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b030df0-0b5c-4854-bdaf-6b61067bed50-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1b030df0-0b5c-4854-bdaf-6b61067bed50" (UID: "1b030df0-0b5c-4854-bdaf-6b61067bed50"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.962048 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6td7n"] Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.964241 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mb2k" event={"ID":"16615745-a673-44e3-8cd7-980d59c421ad","Type":"ContainerDied","Data":"dfb0638a9365c296fd7392a3e68a62b9417e7c20e0ddcd9d1ae38f07fe56a3f7"} Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.964539 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4mb2k" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.965770 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6td7n"] Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.971088 4800 scope.go:117] "RemoveContainer" containerID="e5f20085dab30490c37f3c9abb2e2eb628176e82622436b360defef73c46b500" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.991214 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" podStartSLOduration=1.991180392 podStartE2EDuration="1.991180392s" podCreationTimestamp="2025-11-25 15:24:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:24:13.986733589 +0000 UTC m=+415.041142071" watchObservedRunningTime="2025-11-25 15:24:13.991180392 +0000 UTC m=+415.045588874" Nov 25 15:24:13 crc kubenswrapper[4800]: I1125 15:24:13.995082 4800 scope.go:117] "RemoveContainer" containerID="ef4f01e91b56da431f5e5133c2f2e8897641a3997a41291fdf6af85bb402a81c" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.012066 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xgrss"] Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.019710 4800 scope.go:117] "RemoveContainer" containerID="ae319b11dc4821c31f81de0afc767dc5b3810f3501512efe816d44d3bb85113f" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.023025 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xgrss"] Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.034402 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5h48t"] Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.039126 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5h48t"] Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.039975 4800 scope.go:117] "RemoveContainer" containerID="200db8e86b2e222c8d22631fd57c0b9ff654d16ab0c516b8462b253c6fc8c688" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.055017 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b030df0-0b5c-4854-bdaf-6b61067bed50-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.055075 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4mb2k"] Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.058714 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4mb2k"] Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.066685 4800 scope.go:117] "RemoveContainer" containerID="dcfd6ff47404939b3470729964dc05745ada3810d350c1f1ff1c97de7f2310bf" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.085221 4800 scope.go:117] "RemoveContainer" containerID="fe67d5fcee66a87c3ab1b2cdedbf1d6aeddd0fcdccede724fd53207caee118df" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.108690 4800 scope.go:117] "RemoveContainer" containerID="7a6196573c5b29c508175fee6f2e9a12bc9126a778a71a9f1d1b33666f3604d4" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.125607 4800 scope.go:117] "RemoveContainer" containerID="38c421282f3740f1eb94bb9ca6b085e1a5a7323b8b0dc35f5c7313adec8a201b" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.142804 4800 scope.go:117] "RemoveContainer" containerID="07c4fa3b7db534e9673f1d9350b7e832d3595607c5be7a5f9e3ad5a8586326e4" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.174778 4800 scope.go:117] "RemoveContainer" containerID="ef0604839604a02c74f9c7e61e87ec37a9d2b967d655b06cca1f4c15236b37c7" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.190473 4800 scope.go:117] "RemoveContainer" containerID="5b09b23bbbe226e33cc0bab9313406175dd8aa46bb79553c35896ba89cb78c21" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.207187 4800 scope.go:117] "RemoveContainer" containerID="1afdd79ba2f2e6809b379a06212dc5d1a0f61977d4c4e8c4360c41c9313e314a" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.256722 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hpkx7"] Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.262343 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hpkx7"] Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601115 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-w84s6"] Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601491 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" containerName="extract-utilities" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601515 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" containerName="extract-utilities" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601531 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" containerName="extract-utilities" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601542 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" containerName="extract-utilities" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601565 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" containerName="registry-server" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601575 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" containerName="registry-server" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601588 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" containerName="extract-content" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601599 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" containerName="extract-content" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601612 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" containerName="registry-server" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601622 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" containerName="registry-server" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601636 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601648 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601665 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16615745-a673-44e3-8cd7-980d59c421ad" containerName="extract-utilities" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601676 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="16615745-a673-44e3-8cd7-980d59c421ad" containerName="extract-utilities" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601696 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" containerName="extract-content" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601706 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" containerName="extract-content" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601717 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" containerName="extract-utilities" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601729 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" containerName="extract-utilities" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601744 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" containerName="registry-server" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601755 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" containerName="registry-server" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601773 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16615745-a673-44e3-8cd7-980d59c421ad" containerName="extract-content" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601784 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="16615745-a673-44e3-8cd7-980d59c421ad" containerName="extract-content" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601803 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601813 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601828 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16615745-a673-44e3-8cd7-980d59c421ad" containerName="registry-server" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601839 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="16615745-a673-44e3-8cd7-980d59c421ad" containerName="registry-server" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601892 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601903 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" Nov 25 15:24:14 crc kubenswrapper[4800]: E1125 15:24:14.601917 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" containerName="extract-content" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.601928 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" containerName="extract-content" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.602120 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.602140 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.602158 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" containerName="registry-server" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.602180 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" containerName="marketplace-operator" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.602198 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="16615745-a673-44e3-8cd7-980d59c421ad" containerName="registry-server" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.602211 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" containerName="registry-server" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.602223 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" containerName="registry-server" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.603700 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.606721 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.610286 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w84s6"] Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.766044 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99fkp\" (UniqueName: \"kubernetes.io/projected/2e7bd884-1b29-4700-912b-d934384c1fec-kube-api-access-99fkp\") pod \"redhat-marketplace-w84s6\" (UID: \"2e7bd884-1b29-4700-912b-d934384c1fec\") " pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.766143 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e7bd884-1b29-4700-912b-d934384c1fec-utilities\") pod \"redhat-marketplace-w84s6\" (UID: \"2e7bd884-1b29-4700-912b-d934384c1fec\") " pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.766188 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e7bd884-1b29-4700-912b-d934384c1fec-catalog-content\") pod \"redhat-marketplace-w84s6\" (UID: \"2e7bd884-1b29-4700-912b-d934384c1fec\") " pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.781912 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7hdqn"] Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.783712 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.786436 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.802460 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7hdqn"] Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.867214 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e7bd884-1b29-4700-912b-d934384c1fec-catalog-content\") pod \"redhat-marketplace-w84s6\" (UID: \"2e7bd884-1b29-4700-912b-d934384c1fec\") " pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.867284 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99fkp\" (UniqueName: \"kubernetes.io/projected/2e7bd884-1b29-4700-912b-d934384c1fec-kube-api-access-99fkp\") pod \"redhat-marketplace-w84s6\" (UID: \"2e7bd884-1b29-4700-912b-d934384c1fec\") " pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.867334 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-catalog-content\") pod \"certified-operators-7hdqn\" (UID: \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\") " pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.867394 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2422\" (UniqueName: \"kubernetes.io/projected/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-kube-api-access-j2422\") pod \"certified-operators-7hdqn\" (UID: \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\") " pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.867443 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e7bd884-1b29-4700-912b-d934384c1fec-utilities\") pod \"redhat-marketplace-w84s6\" (UID: \"2e7bd884-1b29-4700-912b-d934384c1fec\") " pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.867473 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-utilities\") pod \"certified-operators-7hdqn\" (UID: \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\") " pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.868722 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e7bd884-1b29-4700-912b-d934384c1fec-catalog-content\") pod \"redhat-marketplace-w84s6\" (UID: \"2e7bd884-1b29-4700-912b-d934384c1fec\") " pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.869120 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e7bd884-1b29-4700-912b-d934384c1fec-utilities\") pod \"redhat-marketplace-w84s6\" (UID: \"2e7bd884-1b29-4700-912b-d934384c1fec\") " pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.897084 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99fkp\" (UniqueName: \"kubernetes.io/projected/2e7bd884-1b29-4700-912b-d934384c1fec-kube-api-access-99fkp\") pod \"redhat-marketplace-w84s6\" (UID: \"2e7bd884-1b29-4700-912b-d934384c1fec\") " pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.930717 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.968443 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-catalog-content\") pod \"certified-operators-7hdqn\" (UID: \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\") " pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.968544 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2422\" (UniqueName: \"kubernetes.io/projected/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-kube-api-access-j2422\") pod \"certified-operators-7hdqn\" (UID: \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\") " pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.968584 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-utilities\") pod \"certified-operators-7hdqn\" (UID: \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\") " pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.969140 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-utilities\") pod \"certified-operators-7hdqn\" (UID: \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\") " pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.969170 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-catalog-content\") pod \"certified-operators-7hdqn\" (UID: \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\") " pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.988576 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-fhrvr" Nov 25 15:24:14 crc kubenswrapper[4800]: I1125 15:24:14.989149 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2422\" (UniqueName: \"kubernetes.io/projected/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-kube-api-access-j2422\") pod \"certified-operators-7hdqn\" (UID: \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\") " pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.102964 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.380430 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w84s6"] Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.536892 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7hdqn"] Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.794589 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16615745-a673-44e3-8cd7-980d59c421ad" path="/var/lib/kubelet/pods/16615745-a673-44e3-8cd7-980d59c421ad/volumes" Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.796615 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b030df0-0b5c-4854-bdaf-6b61067bed50" path="/var/lib/kubelet/pods/1b030df0-0b5c-4854-bdaf-6b61067bed50/volumes" Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.797221 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40b1358b-2b78-4d92-8e03-baf11a6aecde" path="/var/lib/kubelet/pods/40b1358b-2b78-4d92-8e03-baf11a6aecde/volumes" Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.798189 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="932f12b1-d6ce-4e42-b70f-6cd51c1082a1" path="/var/lib/kubelet/pods/932f12b1-d6ce-4e42-b70f-6cd51c1082a1/volumes" Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.798757 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c50787f1-b3aa-49be-adc2-610beeeede6d" path="/var/lib/kubelet/pods/c50787f1-b3aa-49be-adc2-610beeeede6d/volumes" Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.994896 4800 generic.go:334] "Generic (PLEG): container finished" podID="3a67a8f3-a3e6-4d62-a901-ca2427e73f08" containerID="cde8fad6a1ff14ca0c497a36bc26fd5aeab6fd1a393d737e10701765a7914313" exitCode=0 Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.994980 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hdqn" event={"ID":"3a67a8f3-a3e6-4d62-a901-ca2427e73f08","Type":"ContainerDied","Data":"cde8fad6a1ff14ca0c497a36bc26fd5aeab6fd1a393d737e10701765a7914313"} Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.995080 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hdqn" event={"ID":"3a67a8f3-a3e6-4d62-a901-ca2427e73f08","Type":"ContainerStarted","Data":"eb3faab092f5a846e0ec645c5d126c4d518461e19413e175394dd479cf978080"} Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.998986 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.999655 4800 generic.go:334] "Generic (PLEG): container finished" podID="2e7bd884-1b29-4700-912b-d934384c1fec" containerID="fd9ebdfafaa282fe2ca8dc32429ff5663fad3f21c35f5d698337d36b41e165fd" exitCode=0 Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.999824 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w84s6" event={"ID":"2e7bd884-1b29-4700-912b-d934384c1fec","Type":"ContainerDied","Data":"fd9ebdfafaa282fe2ca8dc32429ff5663fad3f21c35f5d698337d36b41e165fd"} Nov 25 15:24:15 crc kubenswrapper[4800]: I1125 15:24:15.999910 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w84s6" event={"ID":"2e7bd884-1b29-4700-912b-d934384c1fec","Type":"ContainerStarted","Data":"e0907b6d0b5b77d095e959bb4c60e2ad2812654a3ec039c3b0ecbff575ce5922"} Nov 25 15:24:16 crc kubenswrapper[4800]: I1125 15:24:16.988882 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-59bdq"] Nov 25 15:24:16 crc kubenswrapper[4800]: I1125 15:24:16.990150 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:16 crc kubenswrapper[4800]: I1125 15:24:16.993628 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.001724 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-59bdq"] Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.100360 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xlw6\" (UniqueName: \"kubernetes.io/projected/ae35c255-4d64-4fdd-acd9-a796315307e4-kube-api-access-7xlw6\") pod \"community-operators-59bdq\" (UID: \"ae35c255-4d64-4fdd-acd9-a796315307e4\") " pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.100443 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae35c255-4d64-4fdd-acd9-a796315307e4-catalog-content\") pod \"community-operators-59bdq\" (UID: \"ae35c255-4d64-4fdd-acd9-a796315307e4\") " pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.100476 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae35c255-4d64-4fdd-acd9-a796315307e4-utilities\") pod \"community-operators-59bdq\" (UID: \"ae35c255-4d64-4fdd-acd9-a796315307e4\") " pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.203494 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xlw6\" (UniqueName: \"kubernetes.io/projected/ae35c255-4d64-4fdd-acd9-a796315307e4-kube-api-access-7xlw6\") pod \"community-operators-59bdq\" (UID: \"ae35c255-4d64-4fdd-acd9-a796315307e4\") " pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.203585 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae35c255-4d64-4fdd-acd9-a796315307e4-catalog-content\") pod \"community-operators-59bdq\" (UID: \"ae35c255-4d64-4fdd-acd9-a796315307e4\") " pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.203618 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae35c255-4d64-4fdd-acd9-a796315307e4-utilities\") pod \"community-operators-59bdq\" (UID: \"ae35c255-4d64-4fdd-acd9-a796315307e4\") " pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.204878 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae35c255-4d64-4fdd-acd9-a796315307e4-utilities\") pod \"community-operators-59bdq\" (UID: \"ae35c255-4d64-4fdd-acd9-a796315307e4\") " pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.205218 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae35c255-4d64-4fdd-acd9-a796315307e4-catalog-content\") pod \"community-operators-59bdq\" (UID: \"ae35c255-4d64-4fdd-acd9-a796315307e4\") " pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.219806 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-24prz"] Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.224188 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.227163 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.229127 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xlw6\" (UniqueName: \"kubernetes.io/projected/ae35c255-4d64-4fdd-acd9-a796315307e4-kube-api-access-7xlw6\") pod \"community-operators-59bdq\" (UID: \"ae35c255-4d64-4fdd-acd9-a796315307e4\") " pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.232106 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-24prz"] Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.305136 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c98cb101-07e4-44a6-972a-7d6cb9cedfe9-catalog-content\") pod \"redhat-operators-24prz\" (UID: \"c98cb101-07e4-44a6-972a-7d6cb9cedfe9\") " pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.305226 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c98cb101-07e4-44a6-972a-7d6cb9cedfe9-utilities\") pod \"redhat-operators-24prz\" (UID: \"c98cb101-07e4-44a6-972a-7d6cb9cedfe9\") " pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.305348 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppq5s\" (UniqueName: \"kubernetes.io/projected/c98cb101-07e4-44a6-972a-7d6cb9cedfe9-kube-api-access-ppq5s\") pod \"redhat-operators-24prz\" (UID: \"c98cb101-07e4-44a6-972a-7d6cb9cedfe9\") " pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.321902 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.408043 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c98cb101-07e4-44a6-972a-7d6cb9cedfe9-utilities\") pod \"redhat-operators-24prz\" (UID: \"c98cb101-07e4-44a6-972a-7d6cb9cedfe9\") " pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.408253 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppq5s\" (UniqueName: \"kubernetes.io/projected/c98cb101-07e4-44a6-972a-7d6cb9cedfe9-kube-api-access-ppq5s\") pod \"redhat-operators-24prz\" (UID: \"c98cb101-07e4-44a6-972a-7d6cb9cedfe9\") " pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.408374 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c98cb101-07e4-44a6-972a-7d6cb9cedfe9-utilities\") pod \"redhat-operators-24prz\" (UID: \"c98cb101-07e4-44a6-972a-7d6cb9cedfe9\") " pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.408536 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c98cb101-07e4-44a6-972a-7d6cb9cedfe9-catalog-content\") pod \"redhat-operators-24prz\" (UID: \"c98cb101-07e4-44a6-972a-7d6cb9cedfe9\") " pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.409889 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c98cb101-07e4-44a6-972a-7d6cb9cedfe9-catalog-content\") pod \"redhat-operators-24prz\" (UID: \"c98cb101-07e4-44a6-972a-7d6cb9cedfe9\") " pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.442220 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppq5s\" (UniqueName: \"kubernetes.io/projected/c98cb101-07e4-44a6-972a-7d6cb9cedfe9-kube-api-access-ppq5s\") pod \"redhat-operators-24prz\" (UID: \"c98cb101-07e4-44a6-972a-7d6cb9cedfe9\") " pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.600556 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:17 crc kubenswrapper[4800]: I1125 15:24:17.762009 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-59bdq"] Nov 25 15:24:17 crc kubenswrapper[4800]: W1125 15:24:17.770049 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae35c255_4d64_4fdd_acd9_a796315307e4.slice/crio-e929384519ba3ff1a9d0d6f1adbd99b2fbef6ffb3af903d55a3bf8746f17dc47 WatchSource:0}: Error finding container e929384519ba3ff1a9d0d6f1adbd99b2fbef6ffb3af903d55a3bf8746f17dc47: Status 404 returned error can't find the container with id e929384519ba3ff1a9d0d6f1adbd99b2fbef6ffb3af903d55a3bf8746f17dc47 Nov 25 15:24:18 crc kubenswrapper[4800]: I1125 15:24:18.015638 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-24prz"] Nov 25 15:24:18 crc kubenswrapper[4800]: I1125 15:24:18.016323 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59bdq" event={"ID":"ae35c255-4d64-4fdd-acd9-a796315307e4","Type":"ContainerStarted","Data":"e929384519ba3ff1a9d0d6f1adbd99b2fbef6ffb3af903d55a3bf8746f17dc47"} Nov 25 15:24:18 crc kubenswrapper[4800]: I1125 15:24:18.019708 4800 generic.go:334] "Generic (PLEG): container finished" podID="3a67a8f3-a3e6-4d62-a901-ca2427e73f08" containerID="59206745335abbc14cea306fdf4e4de6834a435aefbc4ab3f0a4dd21fa896cc1" exitCode=0 Nov 25 15:24:18 crc kubenswrapper[4800]: I1125 15:24:18.019826 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hdqn" event={"ID":"3a67a8f3-a3e6-4d62-a901-ca2427e73f08","Type":"ContainerDied","Data":"59206745335abbc14cea306fdf4e4de6834a435aefbc4ab3f0a4dd21fa896cc1"} Nov 25 15:24:18 crc kubenswrapper[4800]: I1125 15:24:18.022694 4800 generic.go:334] "Generic (PLEG): container finished" podID="2e7bd884-1b29-4700-912b-d934384c1fec" containerID="397fa76343cb579d308efb9a13f50d5a238ce64e19168b7a78e1af1315582a33" exitCode=0 Nov 25 15:24:18 crc kubenswrapper[4800]: I1125 15:24:18.022738 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w84s6" event={"ID":"2e7bd884-1b29-4700-912b-d934384c1fec","Type":"ContainerDied","Data":"397fa76343cb579d308efb9a13f50d5a238ce64e19168b7a78e1af1315582a33"} Nov 25 15:24:18 crc kubenswrapper[4800]: W1125 15:24:18.138598 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc98cb101_07e4_44a6_972a_7d6cb9cedfe9.slice/crio-88e3e3a124fabce04379db559caaa09f87a266cc0a31bf3fa5c950dc1d4425f2 WatchSource:0}: Error finding container 88e3e3a124fabce04379db559caaa09f87a266cc0a31bf3fa5c950dc1d4425f2: Status 404 returned error can't find the container with id 88e3e3a124fabce04379db559caaa09f87a266cc0a31bf3fa5c950dc1d4425f2 Nov 25 15:24:19 crc kubenswrapper[4800]: I1125 15:24:19.032273 4800 generic.go:334] "Generic (PLEG): container finished" podID="c98cb101-07e4-44a6-972a-7d6cb9cedfe9" containerID="ed0b16ab94805096d626b0cb4871fe82905e33bff831ebeaeb779ee9f9055204" exitCode=0 Nov 25 15:24:19 crc kubenswrapper[4800]: I1125 15:24:19.033061 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-24prz" event={"ID":"c98cb101-07e4-44a6-972a-7d6cb9cedfe9","Type":"ContainerDied","Data":"ed0b16ab94805096d626b0cb4871fe82905e33bff831ebeaeb779ee9f9055204"} Nov 25 15:24:19 crc kubenswrapper[4800]: I1125 15:24:19.036101 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-24prz" event={"ID":"c98cb101-07e4-44a6-972a-7d6cb9cedfe9","Type":"ContainerStarted","Data":"88e3e3a124fabce04379db559caaa09f87a266cc0a31bf3fa5c950dc1d4425f2"} Nov 25 15:24:19 crc kubenswrapper[4800]: I1125 15:24:19.039077 4800 generic.go:334] "Generic (PLEG): container finished" podID="ae35c255-4d64-4fdd-acd9-a796315307e4" containerID="6b73391ebff03a1b71da971bbf828c149a906956fb2747d0d3ed4c43fb9b1a44" exitCode=0 Nov 25 15:24:19 crc kubenswrapper[4800]: I1125 15:24:19.039129 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59bdq" event={"ID":"ae35c255-4d64-4fdd-acd9-a796315307e4","Type":"ContainerDied","Data":"6b73391ebff03a1b71da971bbf828c149a906956fb2747d0d3ed4c43fb9b1a44"} Nov 25 15:24:20 crc kubenswrapper[4800]: I1125 15:24:20.050343 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w84s6" event={"ID":"2e7bd884-1b29-4700-912b-d934384c1fec","Type":"ContainerStarted","Data":"9b2759d490dc15dace25d584d51034087fc085e7c849f079e165911e113b2af9"} Nov 25 15:24:20 crc kubenswrapper[4800]: I1125 15:24:20.054097 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59bdq" event={"ID":"ae35c255-4d64-4fdd-acd9-a796315307e4","Type":"ContainerStarted","Data":"ab3e3917b48a8777c81e38e118ec5a2499ddde6d2dbc8893681269b01750a795"} Nov 25 15:24:20 crc kubenswrapper[4800]: I1125 15:24:20.059186 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hdqn" event={"ID":"3a67a8f3-a3e6-4d62-a901-ca2427e73f08","Type":"ContainerStarted","Data":"71444e1ec03530124e6eaaeeb5143a7c41d9a25ce415e8984ac6d334319d98de"} Nov 25 15:24:20 crc kubenswrapper[4800]: I1125 15:24:20.092484 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-24prz" event={"ID":"c98cb101-07e4-44a6-972a-7d6cb9cedfe9","Type":"ContainerStarted","Data":"afee1c5591c97ec940d31771b5d9409c02bf5f0d65ab023e73e2c9da8e64bd81"} Nov 25 15:24:20 crc kubenswrapper[4800]: I1125 15:24:20.097186 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-w84s6" podStartSLOduration=2.9764432899999997 podStartE2EDuration="6.097106853s" podCreationTimestamp="2025-11-25 15:24:14 +0000 UTC" firstStartedPulling="2025-11-25 15:24:16.005262111 +0000 UTC m=+417.059670593" lastFinishedPulling="2025-11-25 15:24:19.125925674 +0000 UTC m=+420.180334156" observedRunningTime="2025-11-25 15:24:20.089780158 +0000 UTC m=+421.144188660" watchObservedRunningTime="2025-11-25 15:24:20.097106853 +0000 UTC m=+421.151515335" Nov 25 15:24:20 crc kubenswrapper[4800]: I1125 15:24:20.138956 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7hdqn" podStartSLOduration=2.9547531129999998 podStartE2EDuration="6.138925954s" podCreationTimestamp="2025-11-25 15:24:14 +0000 UTC" firstStartedPulling="2025-11-25 15:24:15.998629448 +0000 UTC m=+417.053037930" lastFinishedPulling="2025-11-25 15:24:19.182802289 +0000 UTC m=+420.237210771" observedRunningTime="2025-11-25 15:24:20.137822499 +0000 UTC m=+421.192230981" watchObservedRunningTime="2025-11-25 15:24:20.138925954 +0000 UTC m=+421.193334436" Nov 25 15:24:21 crc kubenswrapper[4800]: I1125 15:24:21.100956 4800 generic.go:334] "Generic (PLEG): container finished" podID="c98cb101-07e4-44a6-972a-7d6cb9cedfe9" containerID="afee1c5591c97ec940d31771b5d9409c02bf5f0d65ab023e73e2c9da8e64bd81" exitCode=0 Nov 25 15:24:21 crc kubenswrapper[4800]: I1125 15:24:21.101064 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-24prz" event={"ID":"c98cb101-07e4-44a6-972a-7d6cb9cedfe9","Type":"ContainerDied","Data":"afee1c5591c97ec940d31771b5d9409c02bf5f0d65ab023e73e2c9da8e64bd81"} Nov 25 15:24:21 crc kubenswrapper[4800]: I1125 15:24:21.109079 4800 generic.go:334] "Generic (PLEG): container finished" podID="ae35c255-4d64-4fdd-acd9-a796315307e4" containerID="ab3e3917b48a8777c81e38e118ec5a2499ddde6d2dbc8893681269b01750a795" exitCode=0 Nov 25 15:24:21 crc kubenswrapper[4800]: I1125 15:24:21.109230 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59bdq" event={"ID":"ae35c255-4d64-4fdd-acd9-a796315307e4","Type":"ContainerDied","Data":"ab3e3917b48a8777c81e38e118ec5a2499ddde6d2dbc8893681269b01750a795"} Nov 25 15:24:24 crc kubenswrapper[4800]: I1125 15:24:24.127323 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59bdq" event={"ID":"ae35c255-4d64-4fdd-acd9-a796315307e4","Type":"ContainerStarted","Data":"f21138b5baf63126d28dbd05a20135ccebaf5e9759c4e272e6180c78c864c8eb"} Nov 25 15:24:24 crc kubenswrapper[4800]: I1125 15:24:24.130349 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-24prz" event={"ID":"c98cb101-07e4-44a6-972a-7d6cb9cedfe9","Type":"ContainerStarted","Data":"dd1e67ae58edb8bb90c0380e8671e2c1b32b8dbd4c5bec03fa5069f6246bdb11"} Nov 25 15:24:24 crc kubenswrapper[4800]: I1125 15:24:24.153793 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-59bdq" podStartSLOduration=5.562770807 podStartE2EDuration="8.153772316s" podCreationTimestamp="2025-11-25 15:24:16 +0000 UTC" firstStartedPulling="2025-11-25 15:24:19.117879476 +0000 UTC m=+420.172287958" lastFinishedPulling="2025-11-25 15:24:21.708880985 +0000 UTC m=+422.763289467" observedRunningTime="2025-11-25 15:24:24.152665971 +0000 UTC m=+425.207074453" watchObservedRunningTime="2025-11-25 15:24:24.153772316 +0000 UTC m=+425.208180798" Nov 25 15:24:24 crc kubenswrapper[4800]: I1125 15:24:24.170925 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-24prz" podStartSLOduration=4.764845 podStartE2EDuration="7.170901936s" podCreationTimestamp="2025-11-25 15:24:17 +0000 UTC" firstStartedPulling="2025-11-25 15:24:19.117890546 +0000 UTC m=+420.172299028" lastFinishedPulling="2025-11-25 15:24:21.523947482 +0000 UTC m=+422.578355964" observedRunningTime="2025-11-25 15:24:24.165480462 +0000 UTC m=+425.219888944" watchObservedRunningTime="2025-11-25 15:24:24.170901936 +0000 UTC m=+425.225310418" Nov 25 15:24:24 crc kubenswrapper[4800]: I1125 15:24:24.931958 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:24 crc kubenswrapper[4800]: I1125 15:24:24.934309 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:24 crc kubenswrapper[4800]: I1125 15:24:24.983073 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:25 crc kubenswrapper[4800]: I1125 15:24:25.103334 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:25 crc kubenswrapper[4800]: I1125 15:24:25.103401 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:25 crc kubenswrapper[4800]: I1125 15:24:25.141586 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:25 crc kubenswrapper[4800]: I1125 15:24:25.178741 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-w84s6" Nov 25 15:24:25 crc kubenswrapper[4800]: I1125 15:24:25.193740 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 15:24:27 crc kubenswrapper[4800]: I1125 15:24:27.322797 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:27 crc kubenswrapper[4800]: I1125 15:24:27.323295 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:27 crc kubenswrapper[4800]: I1125 15:24:27.374153 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:27 crc kubenswrapper[4800]: I1125 15:24:27.600702 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:27 crc kubenswrapper[4800]: I1125 15:24:27.600750 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:28 crc kubenswrapper[4800]: I1125 15:24:28.205792 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:24:28 crc kubenswrapper[4800]: I1125 15:24:28.647970 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-24prz" podUID="c98cb101-07e4-44a6-972a-7d6cb9cedfe9" containerName="registry-server" probeResult="failure" output=< Nov 25 15:24:28 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 15:24:28 crc kubenswrapper[4800]: > Nov 25 15:24:37 crc kubenswrapper[4800]: I1125 15:24:37.635363 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:37 crc kubenswrapper[4800]: I1125 15:24:37.672325 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-24prz" Nov 25 15:24:42 crc kubenswrapper[4800]: I1125 15:24:42.640018 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:24:42 crc kubenswrapper[4800]: I1125 15:24:42.640447 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:24:42 crc kubenswrapper[4800]: I1125 15:24:42.640512 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:24:42 crc kubenswrapper[4800]: I1125 15:24:42.641393 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"238219946c20c6136882342e4c1c6dd100485f8911e03584bba4787972e400d4"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 15:24:42 crc kubenswrapper[4800]: I1125 15:24:42.641502 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://238219946c20c6136882342e4c1c6dd100485f8911e03584bba4787972e400d4" gracePeriod=600 Nov 25 15:24:44 crc kubenswrapper[4800]: I1125 15:24:44.242657 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="238219946c20c6136882342e4c1c6dd100485f8911e03584bba4787972e400d4" exitCode=0 Nov 25 15:24:44 crc kubenswrapper[4800]: I1125 15:24:44.242730 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"238219946c20c6136882342e4c1c6dd100485f8911e03584bba4787972e400d4"} Nov 25 15:24:44 crc kubenswrapper[4800]: I1125 15:24:44.243077 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"6935127ce0963b69f67a19a19c36aed432bd71e361cc6d229fd1e9b0d2ffa635"} Nov 25 15:24:44 crc kubenswrapper[4800]: I1125 15:24:44.243113 4800 scope.go:117] "RemoveContainer" containerID="f8c32b9f610fbde3ef1fca8d156290eeb6c18cbc9430a0d45ee9a474bc7ea1da" Nov 25 15:25:21 crc kubenswrapper[4800]: I1125 15:25:21.262442 4800 scope.go:117] "RemoveContainer" containerID="5841d34da310770db993b72b381be6d55f0cda9de946e28d02ba16e49149e8b4" Nov 25 15:27:12 crc kubenswrapper[4800]: I1125 15:27:12.639734 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:27:12 crc kubenswrapper[4800]: I1125 15:27:12.640416 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:27:42 crc kubenswrapper[4800]: I1125 15:27:42.640642 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:27:42 crc kubenswrapper[4800]: I1125 15:27:42.641395 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:28:12 crc kubenswrapper[4800]: I1125 15:28:12.639614 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:28:12 crc kubenswrapper[4800]: I1125 15:28:12.640240 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:28:12 crc kubenswrapper[4800]: I1125 15:28:12.640292 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:28:12 crc kubenswrapper[4800]: I1125 15:28:12.640956 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6935127ce0963b69f67a19a19c36aed432bd71e361cc6d229fd1e9b0d2ffa635"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 15:28:12 crc kubenswrapper[4800]: I1125 15:28:12.641049 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://6935127ce0963b69f67a19a19c36aed432bd71e361cc6d229fd1e9b0d2ffa635" gracePeriod=600 Nov 25 15:28:13 crc kubenswrapper[4800]: I1125 15:28:13.533312 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="6935127ce0963b69f67a19a19c36aed432bd71e361cc6d229fd1e9b0d2ffa635" exitCode=0 Nov 25 15:28:13 crc kubenswrapper[4800]: I1125 15:28:13.533920 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"6935127ce0963b69f67a19a19c36aed432bd71e361cc6d229fd1e9b0d2ffa635"} Nov 25 15:28:13 crc kubenswrapper[4800]: I1125 15:28:13.533974 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"4696a8bc43181471c595e8254afda59b2987f94a9cd2a837cdce4a6a707e3c00"} Nov 25 15:28:13 crc kubenswrapper[4800]: I1125 15:28:13.534010 4800 scope.go:117] "RemoveContainer" containerID="238219946c20c6136882342e4c1c6dd100485f8911e03584bba4787972e400d4" Nov 25 15:29:10 crc kubenswrapper[4800]: I1125 15:29:10.885460 4800 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.386093 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-bjk4b"] Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.388320 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.400326 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-bjk4b"] Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.512015 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b577dfe6-6f24-4849-ac5e-232c25e854a7-registry-tls\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.512082 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rds2\" (UniqueName: \"kubernetes.io/projected/b577dfe6-6f24-4849-ac5e-232c25e854a7-kube-api-access-5rds2\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.512120 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b577dfe6-6f24-4849-ac5e-232c25e854a7-bound-sa-token\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.512150 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b577dfe6-6f24-4849-ac5e-232c25e854a7-registry-certificates\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.512190 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b577dfe6-6f24-4849-ac5e-232c25e854a7-installation-pull-secrets\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.512231 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.512260 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b577dfe6-6f24-4849-ac5e-232c25e854a7-trusted-ca\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.512283 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b577dfe6-6f24-4849-ac5e-232c25e854a7-ca-trust-extracted\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.538137 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.614233 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b577dfe6-6f24-4849-ac5e-232c25e854a7-registry-tls\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.614309 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rds2\" (UniqueName: \"kubernetes.io/projected/b577dfe6-6f24-4849-ac5e-232c25e854a7-kube-api-access-5rds2\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.614351 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b577dfe6-6f24-4849-ac5e-232c25e854a7-bound-sa-token\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.614391 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b577dfe6-6f24-4849-ac5e-232c25e854a7-registry-certificates\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.614437 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b577dfe6-6f24-4849-ac5e-232c25e854a7-installation-pull-secrets\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.614467 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b577dfe6-6f24-4849-ac5e-232c25e854a7-trusted-ca\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.614484 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b577dfe6-6f24-4849-ac5e-232c25e854a7-ca-trust-extracted\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.615389 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b577dfe6-6f24-4849-ac5e-232c25e854a7-ca-trust-extracted\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.617630 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b577dfe6-6f24-4849-ac5e-232c25e854a7-trusted-ca\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.617654 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b577dfe6-6f24-4849-ac5e-232c25e854a7-registry-certificates\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.623388 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b577dfe6-6f24-4849-ac5e-232c25e854a7-installation-pull-secrets\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.623491 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b577dfe6-6f24-4849-ac5e-232c25e854a7-registry-tls\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.636930 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b577dfe6-6f24-4849-ac5e-232c25e854a7-bound-sa-token\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.639451 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rds2\" (UniqueName: \"kubernetes.io/projected/b577dfe6-6f24-4849-ac5e-232c25e854a7-kube-api-access-5rds2\") pod \"image-registry-66df7c8f76-bjk4b\" (UID: \"b577dfe6-6f24-4849-ac5e-232c25e854a7\") " pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.708884 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.925763 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-bjk4b"] Nov 25 15:29:16 crc kubenswrapper[4800]: I1125 15:29:16.954554 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" event={"ID":"b577dfe6-6f24-4849-ac5e-232c25e854a7","Type":"ContainerStarted","Data":"dc1106250c287c3d8ae14f0eca27335efe0a2571c5600bd08902a6126a660767"} Nov 25 15:29:17 crc kubenswrapper[4800]: I1125 15:29:17.961792 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" event={"ID":"b577dfe6-6f24-4849-ac5e-232c25e854a7","Type":"ContainerStarted","Data":"b34894ee891ab459c4aab95d6dda75d5416dc0497033cc01a70b737d08362452"} Nov 25 15:29:17 crc kubenswrapper[4800]: I1125 15:29:17.962398 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:17 crc kubenswrapper[4800]: I1125 15:29:17.988948 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" podStartSLOduration=1.9889249709999999 podStartE2EDuration="1.988924971s" podCreationTimestamp="2025-11-25 15:29:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:29:17.986782223 +0000 UTC m=+719.041190725" watchObservedRunningTime="2025-11-25 15:29:17.988924971 +0000 UTC m=+719.043333453" Nov 25 15:29:36 crc kubenswrapper[4800]: I1125 15:29:36.717716 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-bjk4b" Nov 25 15:29:36 crc kubenswrapper[4800]: I1125 15:29:36.795792 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7txz7"] Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.177038 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8"] Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.178405 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.180834 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.180865 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.184239 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8"] Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.328318 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/80c53442-63bf-4ab5-815a-cf84b18e3464-secret-volume\") pod \"collect-profiles-29401410-nwkd8\" (UID: \"80c53442-63bf-4ab5-815a-cf84b18e3464\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.328390 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/80c53442-63bf-4ab5-815a-cf84b18e3464-config-volume\") pod \"collect-profiles-29401410-nwkd8\" (UID: \"80c53442-63bf-4ab5-815a-cf84b18e3464\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.328435 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sf7wj\" (UniqueName: \"kubernetes.io/projected/80c53442-63bf-4ab5-815a-cf84b18e3464-kube-api-access-sf7wj\") pod \"collect-profiles-29401410-nwkd8\" (UID: \"80c53442-63bf-4ab5-815a-cf84b18e3464\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.429572 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/80c53442-63bf-4ab5-815a-cf84b18e3464-secret-volume\") pod \"collect-profiles-29401410-nwkd8\" (UID: \"80c53442-63bf-4ab5-815a-cf84b18e3464\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.430343 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/80c53442-63bf-4ab5-815a-cf84b18e3464-config-volume\") pod \"collect-profiles-29401410-nwkd8\" (UID: \"80c53442-63bf-4ab5-815a-cf84b18e3464\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.430438 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sf7wj\" (UniqueName: \"kubernetes.io/projected/80c53442-63bf-4ab5-815a-cf84b18e3464-kube-api-access-sf7wj\") pod \"collect-profiles-29401410-nwkd8\" (UID: \"80c53442-63bf-4ab5-815a-cf84b18e3464\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.432131 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/80c53442-63bf-4ab5-815a-cf84b18e3464-config-volume\") pod \"collect-profiles-29401410-nwkd8\" (UID: \"80c53442-63bf-4ab5-815a-cf84b18e3464\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.435808 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/80c53442-63bf-4ab5-815a-cf84b18e3464-secret-volume\") pod \"collect-profiles-29401410-nwkd8\" (UID: \"80c53442-63bf-4ab5-815a-cf84b18e3464\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.446361 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sf7wj\" (UniqueName: \"kubernetes.io/projected/80c53442-63bf-4ab5-815a-cf84b18e3464-kube-api-access-sf7wj\") pod \"collect-profiles-29401410-nwkd8\" (UID: \"80c53442-63bf-4ab5-815a-cf84b18e3464\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.501610 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:00 crc kubenswrapper[4800]: I1125 15:30:00.689106 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8"] Nov 25 15:30:01 crc kubenswrapper[4800]: I1125 15:30:01.201602 4800 generic.go:334] "Generic (PLEG): container finished" podID="80c53442-63bf-4ab5-815a-cf84b18e3464" containerID="6c9f10cba91f358845ee3cf2ed9103a68565cf8220f2e0b13efce486fdf0e134" exitCode=0 Nov 25 15:30:01 crc kubenswrapper[4800]: I1125 15:30:01.201651 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" event={"ID":"80c53442-63bf-4ab5-815a-cf84b18e3464","Type":"ContainerDied","Data":"6c9f10cba91f358845ee3cf2ed9103a68565cf8220f2e0b13efce486fdf0e134"} Nov 25 15:30:01 crc kubenswrapper[4800]: I1125 15:30:01.201930 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" event={"ID":"80c53442-63bf-4ab5-815a-cf84b18e3464","Type":"ContainerStarted","Data":"03d8222b0d51758e270e32ed34b2ce9e4e06faaad166b58560b9613cc334b779"} Nov 25 15:30:01 crc kubenswrapper[4800]: I1125 15:30:01.854287 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" podUID="cf691fcb-4403-45a8-80e0-58a2c50f5481" containerName="registry" containerID="cri-o://a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099" gracePeriod=30 Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.168058 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.209745 4800 generic.go:334] "Generic (PLEG): container finished" podID="cf691fcb-4403-45a8-80e0-58a2c50f5481" containerID="a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099" exitCode=0 Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.209796 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" event={"ID":"cf691fcb-4403-45a8-80e0-58a2c50f5481","Type":"ContainerDied","Data":"a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099"} Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.209860 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.209889 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-7txz7" event={"ID":"cf691fcb-4403-45a8-80e0-58a2c50f5481","Type":"ContainerDied","Data":"eff66c7ebbb71bc22ced9b54d377bab1a948490fea144cf402feb69a3eebae3b"} Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.209908 4800 scope.go:117] "RemoveContainer" containerID="a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.231027 4800 scope.go:117] "RemoveContainer" containerID="a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099" Nov 25 15:30:02 crc kubenswrapper[4800]: E1125 15:30:02.231767 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099\": container with ID starting with a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099 not found: ID does not exist" containerID="a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.231803 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099"} err="failed to get container status \"a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099\": rpc error: code = NotFound desc = could not find container \"a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099\": container with ID starting with a1b61c295569f20b1ffeb788507b62dc9affc59cc0aa03fd8d314ec71d641099 not found: ID does not exist" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.260933 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cf691fcb-4403-45a8-80e0-58a2c50f5481-ca-trust-extracted\") pod \"cf691fcb-4403-45a8-80e0-58a2c50f5481\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.261005 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cf691fcb-4403-45a8-80e0-58a2c50f5481-installation-pull-secrets\") pod \"cf691fcb-4403-45a8-80e0-58a2c50f5481\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.261085 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-registry-tls\") pod \"cf691fcb-4403-45a8-80e0-58a2c50f5481\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.261295 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cf691fcb-4403-45a8-80e0-58a2c50f5481-trusted-ca\") pod \"cf691fcb-4403-45a8-80e0-58a2c50f5481\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.261337 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-bound-sa-token\") pod \"cf691fcb-4403-45a8-80e0-58a2c50f5481\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.261387 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cf691fcb-4403-45a8-80e0-58a2c50f5481-registry-certificates\") pod \"cf691fcb-4403-45a8-80e0-58a2c50f5481\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.261557 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"cf691fcb-4403-45a8-80e0-58a2c50f5481\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.261618 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6bcz\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-kube-api-access-x6bcz\") pod \"cf691fcb-4403-45a8-80e0-58a2c50f5481\" (UID: \"cf691fcb-4403-45a8-80e0-58a2c50f5481\") " Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.262805 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf691fcb-4403-45a8-80e0-58a2c50f5481-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "cf691fcb-4403-45a8-80e0-58a2c50f5481" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.262819 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf691fcb-4403-45a8-80e0-58a2c50f5481-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "cf691fcb-4403-45a8-80e0-58a2c50f5481" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.263172 4800 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cf691fcb-4403-45a8-80e0-58a2c50f5481-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.263196 4800 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cf691fcb-4403-45a8-80e0-58a2c50f5481-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.268863 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "cf691fcb-4403-45a8-80e0-58a2c50f5481" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.275530 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "cf691fcb-4403-45a8-80e0-58a2c50f5481" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.277487 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "cf691fcb-4403-45a8-80e0-58a2c50f5481" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.279871 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf691fcb-4403-45a8-80e0-58a2c50f5481-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "cf691fcb-4403-45a8-80e0-58a2c50f5481" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.285555 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-kube-api-access-x6bcz" (OuterVolumeSpecName: "kube-api-access-x6bcz") pod "cf691fcb-4403-45a8-80e0-58a2c50f5481" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481"). InnerVolumeSpecName "kube-api-access-x6bcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.286565 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf691fcb-4403-45a8-80e0-58a2c50f5481-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "cf691fcb-4403-45a8-80e0-58a2c50f5481" (UID: "cf691fcb-4403-45a8-80e0-58a2c50f5481"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.366706 4800 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.366755 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6bcz\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-kube-api-access-x6bcz\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.366770 4800 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cf691fcb-4403-45a8-80e0-58a2c50f5481-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.366783 4800 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cf691fcb-4403-45a8-80e0-58a2c50f5481-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.366797 4800 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cf691fcb-4403-45a8-80e0-58a2c50f5481-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.382226 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.543495 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7txz7"] Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.551164 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7txz7"] Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.570337 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/80c53442-63bf-4ab5-815a-cf84b18e3464-secret-volume\") pod \"80c53442-63bf-4ab5-815a-cf84b18e3464\" (UID: \"80c53442-63bf-4ab5-815a-cf84b18e3464\") " Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.570428 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/80c53442-63bf-4ab5-815a-cf84b18e3464-config-volume\") pod \"80c53442-63bf-4ab5-815a-cf84b18e3464\" (UID: \"80c53442-63bf-4ab5-815a-cf84b18e3464\") " Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.570623 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sf7wj\" (UniqueName: \"kubernetes.io/projected/80c53442-63bf-4ab5-815a-cf84b18e3464-kube-api-access-sf7wj\") pod \"80c53442-63bf-4ab5-815a-cf84b18e3464\" (UID: \"80c53442-63bf-4ab5-815a-cf84b18e3464\") " Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.571287 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80c53442-63bf-4ab5-815a-cf84b18e3464-config-volume" (OuterVolumeSpecName: "config-volume") pod "80c53442-63bf-4ab5-815a-cf84b18e3464" (UID: "80c53442-63bf-4ab5-815a-cf84b18e3464"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.574177 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80c53442-63bf-4ab5-815a-cf84b18e3464-kube-api-access-sf7wj" (OuterVolumeSpecName: "kube-api-access-sf7wj") pod "80c53442-63bf-4ab5-815a-cf84b18e3464" (UID: "80c53442-63bf-4ab5-815a-cf84b18e3464"). InnerVolumeSpecName "kube-api-access-sf7wj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.574957 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80c53442-63bf-4ab5-815a-cf84b18e3464-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "80c53442-63bf-4ab5-815a-cf84b18e3464" (UID: "80c53442-63bf-4ab5-815a-cf84b18e3464"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.671830 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sf7wj\" (UniqueName: \"kubernetes.io/projected/80c53442-63bf-4ab5-815a-cf84b18e3464-kube-api-access-sf7wj\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.671882 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/80c53442-63bf-4ab5-815a-cf84b18e3464-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:02 crc kubenswrapper[4800]: I1125 15:30:02.671894 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/80c53442-63bf-4ab5-815a-cf84b18e3464-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:03 crc kubenswrapper[4800]: I1125 15:30:03.216600 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" Nov 25 15:30:03 crc kubenswrapper[4800]: I1125 15:30:03.216584 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8" event={"ID":"80c53442-63bf-4ab5-815a-cf84b18e3464","Type":"ContainerDied","Data":"03d8222b0d51758e270e32ed34b2ce9e4e06faaad166b58560b9613cc334b779"} Nov 25 15:30:03 crc kubenswrapper[4800]: I1125 15:30:03.216798 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03d8222b0d51758e270e32ed34b2ce9e4e06faaad166b58560b9613cc334b779" Nov 25 15:30:03 crc kubenswrapper[4800]: I1125 15:30:03.800477 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf691fcb-4403-45a8-80e0-58a2c50f5481" path="/var/lib/kubelet/pods/cf691fcb-4403-45a8-80e0-58a2c50f5481/volumes" Nov 25 15:30:12 crc kubenswrapper[4800]: I1125 15:30:12.640672 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:30:12 crc kubenswrapper[4800]: I1125 15:30:12.641696 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.857239 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-24s7f"] Nov 25 15:30:27 crc kubenswrapper[4800]: E1125 15:30:27.858743 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80c53442-63bf-4ab5-815a-cf84b18e3464" containerName="collect-profiles" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.858765 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80c53442-63bf-4ab5-815a-cf84b18e3464" containerName="collect-profiles" Nov 25 15:30:27 crc kubenswrapper[4800]: E1125 15:30:27.858783 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf691fcb-4403-45a8-80e0-58a2c50f5481" containerName="registry" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.858791 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf691fcb-4403-45a8-80e0-58a2c50f5481" containerName="registry" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.858997 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="80c53442-63bf-4ab5-815a-cf84b18e3464" containerName="collect-profiles" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.859015 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf691fcb-4403-45a8-80e0-58a2c50f5481" containerName="registry" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.859959 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-24s7f" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.861660 4800 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-kcns4" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.864028 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-24s7f"] Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.869206 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.869701 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.879936 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-mhrc8"] Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.880566 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-mhrc8" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.882485 4800 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-jvxpp" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.898112 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-z579p"] Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.899617 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-z579p" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.903109 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-mhrc8"] Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.904374 4800 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-hw4ck" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.914836 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-z579p"] Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.950573 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdwmd\" (UniqueName: \"kubernetes.io/projected/ee4fea7b-32ed-4315-8b7b-9bafd32a6ebb-kube-api-access-hdwmd\") pod \"cert-manager-5b446d88c5-mhrc8\" (UID: \"ee4fea7b-32ed-4315-8b7b-9bafd32a6ebb\") " pod="cert-manager/cert-manager-5b446d88c5-mhrc8" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.950645 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sln52\" (UniqueName: \"kubernetes.io/projected/c42ac87c-4158-4d13-99f5-634729b126dd-kube-api-access-sln52\") pod \"cert-manager-webhook-5655c58dd6-z579p\" (UID: \"c42ac87c-4158-4d13-99f5-634729b126dd\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-z579p" Nov 25 15:30:27 crc kubenswrapper[4800]: I1125 15:30:27.950868 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rzdv\" (UniqueName: \"kubernetes.io/projected/68f1e02e-3adc-4214-bcce-8d3fea0e02ef-kube-api-access-7rzdv\") pod \"cert-manager-cainjector-7f985d654d-24s7f\" (UID: \"68f1e02e-3adc-4214-bcce-8d3fea0e02ef\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-24s7f" Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.052020 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdwmd\" (UniqueName: \"kubernetes.io/projected/ee4fea7b-32ed-4315-8b7b-9bafd32a6ebb-kube-api-access-hdwmd\") pod \"cert-manager-5b446d88c5-mhrc8\" (UID: \"ee4fea7b-32ed-4315-8b7b-9bafd32a6ebb\") " pod="cert-manager/cert-manager-5b446d88c5-mhrc8" Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.052146 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sln52\" (UniqueName: \"kubernetes.io/projected/c42ac87c-4158-4d13-99f5-634729b126dd-kube-api-access-sln52\") pod \"cert-manager-webhook-5655c58dd6-z579p\" (UID: \"c42ac87c-4158-4d13-99f5-634729b126dd\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-z579p" Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.052208 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rzdv\" (UniqueName: \"kubernetes.io/projected/68f1e02e-3adc-4214-bcce-8d3fea0e02ef-kube-api-access-7rzdv\") pod \"cert-manager-cainjector-7f985d654d-24s7f\" (UID: \"68f1e02e-3adc-4214-bcce-8d3fea0e02ef\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-24s7f" Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.075579 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sln52\" (UniqueName: \"kubernetes.io/projected/c42ac87c-4158-4d13-99f5-634729b126dd-kube-api-access-sln52\") pod \"cert-manager-webhook-5655c58dd6-z579p\" (UID: \"c42ac87c-4158-4d13-99f5-634729b126dd\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-z579p" Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.075579 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rzdv\" (UniqueName: \"kubernetes.io/projected/68f1e02e-3adc-4214-bcce-8d3fea0e02ef-kube-api-access-7rzdv\") pod \"cert-manager-cainjector-7f985d654d-24s7f\" (UID: \"68f1e02e-3adc-4214-bcce-8d3fea0e02ef\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-24s7f" Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.078107 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdwmd\" (UniqueName: \"kubernetes.io/projected/ee4fea7b-32ed-4315-8b7b-9bafd32a6ebb-kube-api-access-hdwmd\") pod \"cert-manager-5b446d88c5-mhrc8\" (UID: \"ee4fea7b-32ed-4315-8b7b-9bafd32a6ebb\") " pod="cert-manager/cert-manager-5b446d88c5-mhrc8" Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.180507 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-24s7f" Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.198635 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-mhrc8" Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.217257 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-z579p" Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.467868 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-24s7f"] Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.473245 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.719963 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-z579p"] Nov 25 15:30:28 crc kubenswrapper[4800]: W1125 15:30:28.724848 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc42ac87c_4158_4d13_99f5_634729b126dd.slice/crio-a56256bc23aea77e634880c934b749ab29add4ddb728b6b7283b361c12391cc0 WatchSource:0}: Error finding container a56256bc23aea77e634880c934b749ab29add4ddb728b6b7283b361c12391cc0: Status 404 returned error can't find the container with id a56256bc23aea77e634880c934b749ab29add4ddb728b6b7283b361c12391cc0 Nov 25 15:30:28 crc kubenswrapper[4800]: I1125 15:30:28.725801 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-mhrc8"] Nov 25 15:30:28 crc kubenswrapper[4800]: W1125 15:30:28.729182 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee4fea7b_32ed_4315_8b7b_9bafd32a6ebb.slice/crio-9f290bd0418b270e8a13805376e8111042512781245ad863630852b1ad6f5c08 WatchSource:0}: Error finding container 9f290bd0418b270e8a13805376e8111042512781245ad863630852b1ad6f5c08: Status 404 returned error can't find the container with id 9f290bd0418b270e8a13805376e8111042512781245ad863630852b1ad6f5c08 Nov 25 15:30:29 crc kubenswrapper[4800]: I1125 15:30:29.381203 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-24s7f" event={"ID":"68f1e02e-3adc-4214-bcce-8d3fea0e02ef","Type":"ContainerStarted","Data":"cc40622404dcd88e8176d491336be3ed0b061dd6ec83438a559d53e3df340a36"} Nov 25 15:30:29 crc kubenswrapper[4800]: I1125 15:30:29.382546 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-mhrc8" event={"ID":"ee4fea7b-32ed-4315-8b7b-9bafd32a6ebb","Type":"ContainerStarted","Data":"9f290bd0418b270e8a13805376e8111042512781245ad863630852b1ad6f5c08"} Nov 25 15:30:29 crc kubenswrapper[4800]: I1125 15:30:29.383684 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-z579p" event={"ID":"c42ac87c-4158-4d13-99f5-634729b126dd","Type":"ContainerStarted","Data":"a56256bc23aea77e634880c934b749ab29add4ddb728b6b7283b361c12391cc0"} Nov 25 15:30:35 crc kubenswrapper[4800]: I1125 15:30:35.423615 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-mhrc8" event={"ID":"ee4fea7b-32ed-4315-8b7b-9bafd32a6ebb","Type":"ContainerStarted","Data":"a5adcb3cc911fbaf802293d44e50b00cdf4f65b47b761678a9ab12cc296e2979"} Nov 25 15:30:35 crc kubenswrapper[4800]: I1125 15:30:35.432821 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-z579p" event={"ID":"c42ac87c-4158-4d13-99f5-634729b126dd","Type":"ContainerStarted","Data":"008575f0e375fdd75eeb7d83f0767a22e47a265d3824c6988c99b2b3263aea9f"} Nov 25 15:30:35 crc kubenswrapper[4800]: I1125 15:30:35.433297 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-z579p" Nov 25 15:30:35 crc kubenswrapper[4800]: I1125 15:30:35.436878 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-24s7f" event={"ID":"68f1e02e-3adc-4214-bcce-8d3fea0e02ef","Type":"ContainerStarted","Data":"848799462a2fa54c96e9b819775c79d4712c5d15dea3b1c6ff75097f2338d4ca"} Nov 25 15:30:35 crc kubenswrapper[4800]: I1125 15:30:35.459785 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-mhrc8" podStartSLOduration=2.449645763 podStartE2EDuration="8.459755936s" podCreationTimestamp="2025-11-25 15:30:27 +0000 UTC" firstStartedPulling="2025-11-25 15:30:28.730573198 +0000 UTC m=+789.784981680" lastFinishedPulling="2025-11-25 15:30:34.740683371 +0000 UTC m=+795.795091853" observedRunningTime="2025-11-25 15:30:35.455477887 +0000 UTC m=+796.509886439" watchObservedRunningTime="2025-11-25 15:30:35.459755936 +0000 UTC m=+796.514164458" Nov 25 15:30:35 crc kubenswrapper[4800]: I1125 15:30:35.474503 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-24s7f" podStartSLOduration=2.272722306 podStartE2EDuration="8.474477527s" podCreationTimestamp="2025-11-25 15:30:27 +0000 UTC" firstStartedPulling="2025-11-25 15:30:28.472903989 +0000 UTC m=+789.527312471" lastFinishedPulling="2025-11-25 15:30:34.67465921 +0000 UTC m=+795.729067692" observedRunningTime="2025-11-25 15:30:35.472307388 +0000 UTC m=+796.526715920" watchObservedRunningTime="2025-11-25 15:30:35.474477527 +0000 UTC m=+796.528886049" Nov 25 15:30:35 crc kubenswrapper[4800]: I1125 15:30:35.504608 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-z579p" podStartSLOduration=2.558741043 podStartE2EDuration="8.504589076s" podCreationTimestamp="2025-11-25 15:30:27 +0000 UTC" firstStartedPulling="2025-11-25 15:30:28.729271777 +0000 UTC m=+789.783680259" lastFinishedPulling="2025-11-25 15:30:34.67511981 +0000 UTC m=+795.729528292" observedRunningTime="2025-11-25 15:30:35.501410413 +0000 UTC m=+796.555818905" watchObservedRunningTime="2025-11-25 15:30:35.504589076 +0000 UTC m=+796.558997568" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.408229 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mvthw"] Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.408996 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovn-controller" containerID="cri-o://fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac" gracePeriod=30 Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.409049 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8" gracePeriod=30 Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.409096 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="northd" containerID="cri-o://28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9" gracePeriod=30 Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.409163 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="kube-rbac-proxy-node" containerID="cri-o://1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e" gracePeriod=30 Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.409051 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="nbdb" containerID="cri-o://f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae" gracePeriod=30 Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.409242 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovn-acl-logging" containerID="cri-o://9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f" gracePeriod=30 Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.409338 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="sbdb" containerID="cri-o://959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451" gracePeriod=30 Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.479895 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" containerID="cri-o://c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6" gracePeriod=30 Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.773171 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovnkube-controller/2.log" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.775593 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovn-acl-logging/0.log" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.776023 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovn-controller/0.log" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.776425 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.831746 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-tsgld"] Nov 25 15:30:38 crc kubenswrapper[4800]: E1125 15:30:38.831995 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832010 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: E1125 15:30:38.832021 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="northd" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832030 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="northd" Nov 25 15:30:38 crc kubenswrapper[4800]: E1125 15:30:38.832042 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832050 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: E1125 15:30:38.832061 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovn-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832070 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovn-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: E1125 15:30:38.832083 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832092 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 15:30:38 crc kubenswrapper[4800]: E1125 15:30:38.832107 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="kubecfg-setup" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832115 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="kubecfg-setup" Nov 25 15:30:38 crc kubenswrapper[4800]: E1125 15:30:38.832127 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="kube-rbac-proxy-node" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832137 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="kube-rbac-proxy-node" Nov 25 15:30:38 crc kubenswrapper[4800]: E1125 15:30:38.832148 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832156 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: E1125 15:30:38.832167 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="nbdb" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832175 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="nbdb" Nov 25 15:30:38 crc kubenswrapper[4800]: E1125 15:30:38.832187 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="sbdb" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832195 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="sbdb" Nov 25 15:30:38 crc kubenswrapper[4800]: E1125 15:30:38.832209 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovn-acl-logging" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832217 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovn-acl-logging" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832335 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="nbdb" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832349 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832358 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832370 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovn-acl-logging" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832380 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="sbdb" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832393 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovn-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832401 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832414 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832425 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="northd" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832436 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="kube-rbac-proxy-node" Nov 25 15:30:38 crc kubenswrapper[4800]: E1125 15:30:38.832549 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832559 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.832676 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerName="ovnkube-controller" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.834654 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897413 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-cni-bin\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897449 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-run-netns\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897471 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-run-ovn-kubernetes\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897492 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-node-log\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897532 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-slash\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897533 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-node-log" (OuterVolumeSpecName: "node-log") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897550 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897565 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897577 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897578 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-slash" (OuterVolumeSpecName: "host-slash") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897736 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-ovn\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897771 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897919 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-kubelet\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.897990 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898118 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-systemd-units\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898144 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-openvswitch\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898177 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898207 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898297 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-systemd\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898344 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jbt5\" (UniqueName: \"kubernetes.io/projected/80e4f44d-4647-4e15-a29f-2672fc065d82-kube-api-access-4jbt5\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898364 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-var-lib-cni-networks-ovn-kubernetes\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898384 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/80e4f44d-4647-4e15-a29f-2672fc065d82-ovn-node-metrics-cert\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898404 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-etc-openvswitch\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898421 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-env-overrides\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898448 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-ovnkube-config\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898473 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898485 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-var-lib-openvswitch\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898503 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898529 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-log-socket\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898551 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-cni-netd\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898565 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-ovnkube-script-lib\") pod \"80e4f44d-4647-4e15-a29f-2672fc065d82\" (UID: \"80e4f44d-4647-4e15-a29f-2672fc065d82\") " Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898600 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898605 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-log-socket" (OuterVolumeSpecName: "log-socket") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898623 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898706 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-cni-bin\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898739 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-run-systemd\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898754 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-systemd-units\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898770 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-node-log\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898788 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-run-netns\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898882 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-var-lib-openvswitch\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898926 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9dac80ee-9d7a-4194-9834-f4b1e2915017-ovn-node-metrics-cert\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898943 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898951 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-log-socket\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.898959 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899008 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899008 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899060 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-run-ovn\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899079 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-slash\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899108 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-etc-openvswitch\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899131 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zw5kq\" (UniqueName: \"kubernetes.io/projected/9dac80ee-9d7a-4194-9834-f4b1e2915017-kube-api-access-zw5kq\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899194 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-run-openvswitch\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899211 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-cni-netd\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899231 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-kubelet\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899248 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9dac80ee-9d7a-4194-9834-f4b1e2915017-ovnkube-script-lib\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899269 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9dac80ee-9d7a-4194-9834-f4b1e2915017-ovnkube-config\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899330 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9dac80ee-9d7a-4194-9834-f4b1e2915017-env-overrides\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899372 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-run-ovn-kubernetes\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899450 4800 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899461 4800 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899471 4800 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899482 4800 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-log-socket\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899490 4800 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899501 4800 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/80e4f44d-4647-4e15-a29f-2672fc065d82-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899509 4800 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899517 4800 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899525 4800 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899536 4800 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-node-log\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899544 4800 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-slash\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899552 4800 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899560 4800 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899568 4800 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899577 4800 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899588 4800 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.899598 4800 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.903572 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80e4f44d-4647-4e15-a29f-2672fc065d82-kube-api-access-4jbt5" (OuterVolumeSpecName: "kube-api-access-4jbt5") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "kube-api-access-4jbt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.904179 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80e4f44d-4647-4e15-a29f-2672fc065d82-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:30:38 crc kubenswrapper[4800]: I1125 15:30:38.911531 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "80e4f44d-4647-4e15-a29f-2672fc065d82" (UID: "80e4f44d-4647-4e15-a29f-2672fc065d82"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.000251 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-etc-openvswitch\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.000368 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zw5kq\" (UniqueName: \"kubernetes.io/projected/9dac80ee-9d7a-4194-9834-f4b1e2915017-kube-api-access-zw5kq\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.000451 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-run-openvswitch\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.000529 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-run-openvswitch\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.000467 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-etc-openvswitch\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.000622 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-cni-netd\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.000737 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-cni-netd\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.000918 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-kubelet\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.000970 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-kubelet\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.001977 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9dac80ee-9d7a-4194-9834-f4b1e2915017-ovnkube-script-lib\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002205 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9dac80ee-9d7a-4194-9834-f4b1e2915017-ovnkube-config\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002251 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-run-ovn-kubernetes\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002283 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9dac80ee-9d7a-4194-9834-f4b1e2915017-env-overrides\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002363 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-cni-bin\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002436 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-run-systemd\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002463 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-run-ovn-kubernetes\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002478 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-systemd-units\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002565 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-systemd-units\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002597 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-cni-bin\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002621 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-node-log\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002629 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-run-systemd\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002568 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-node-log\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002826 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-run-netns\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002918 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-var-lib-openvswitch\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002991 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-var-lib-openvswitch\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003000 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9dac80ee-9d7a-4194-9834-f4b1e2915017-ovn-node-metrics-cert\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003080 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-log-socket\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003103 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003163 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-slash\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003180 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-run-ovn\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.002957 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-run-netns\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003282 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-log-socket\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003543 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9dac80ee-9d7a-4194-9834-f4b1e2915017-ovnkube-config\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003569 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9dac80ee-9d7a-4194-9834-f4b1e2915017-env-overrides\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003770 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003803 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-host-slash\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003809 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9dac80ee-9d7a-4194-9834-f4b1e2915017-run-ovn\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003858 4800 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/80e4f44d-4647-4e15-a29f-2672fc065d82-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003871 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jbt5\" (UniqueName: \"kubernetes.io/projected/80e4f44d-4647-4e15-a29f-2672fc065d82-kube-api-access-4jbt5\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003881 4800 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/80e4f44d-4647-4e15-a29f-2672fc065d82-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.003908 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9dac80ee-9d7a-4194-9834-f4b1e2915017-ovnkube-script-lib\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.014828 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9dac80ee-9d7a-4194-9834-f4b1e2915017-ovn-node-metrics-cert\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.023165 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zw5kq\" (UniqueName: \"kubernetes.io/projected/9dac80ee-9d7a-4194-9834-f4b1e2915017-kube-api-access-zw5kq\") pod \"ovnkube-node-tsgld\" (UID: \"9dac80ee-9d7a-4194-9834-f4b1e2915017\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.156066 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:39 crc kubenswrapper[4800]: W1125 15:30:39.185642 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9dac80ee_9d7a_4194_9834_f4b1e2915017.slice/crio-764992c1b85c69c46cd3142b0399e0d4009eddeaf7a0b1dd061d4177da74fb96 WatchSource:0}: Error finding container 764992c1b85c69c46cd3142b0399e0d4009eddeaf7a0b1dd061d4177da74fb96: Status 404 returned error can't find the container with id 764992c1b85c69c46cd3142b0399e0d4009eddeaf7a0b1dd061d4177da74fb96 Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.486236 4800 generic.go:334] "Generic (PLEG): container finished" podID="9dac80ee-9d7a-4194-9834-f4b1e2915017" containerID="3f25fc8241e1b737ba1d000bd19bd0169a92b8dfedaae4c7701361b8eced2b30" exitCode=0 Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.486331 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" event={"ID":"9dac80ee-9d7a-4194-9834-f4b1e2915017","Type":"ContainerDied","Data":"3f25fc8241e1b737ba1d000bd19bd0169a92b8dfedaae4c7701361b8eced2b30"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.486407 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" event={"ID":"9dac80ee-9d7a-4194-9834-f4b1e2915017","Type":"ContainerStarted","Data":"764992c1b85c69c46cd3142b0399e0d4009eddeaf7a0b1dd061d4177da74fb96"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.491532 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovnkube-controller/2.log" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.496831 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovn-acl-logging/0.log" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.497824 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvthw_80e4f44d-4647-4e15-a29f-2672fc065d82/ovn-controller/0.log" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.498651 4800 generic.go:334] "Generic (PLEG): container finished" podID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerID="c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6" exitCode=0 Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.498688 4800 generic.go:334] "Generic (PLEG): container finished" podID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerID="959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451" exitCode=0 Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.498704 4800 generic.go:334] "Generic (PLEG): container finished" podID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerID="f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae" exitCode=0 Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.498718 4800 generic.go:334] "Generic (PLEG): container finished" podID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerID="28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9" exitCode=0 Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.498731 4800 generic.go:334] "Generic (PLEG): container finished" podID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerID="ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8" exitCode=0 Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.498744 4800 generic.go:334] "Generic (PLEG): container finished" podID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerID="1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e" exitCode=0 Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.498757 4800 generic.go:334] "Generic (PLEG): container finished" podID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerID="9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f" exitCode=143 Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.498779 4800 generic.go:334] "Generic (PLEG): container finished" podID="80e4f44d-4647-4e15-a29f-2672fc065d82" containerID="fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac" exitCode=143 Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.498807 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.498746 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.498978 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499004 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499027 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499046 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499067 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499086 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499103 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499115 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499118 4800 scope.go:117] "RemoveContainer" containerID="c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499126 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499326 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499348 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499365 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499382 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499396 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499423 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499459 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499478 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499493 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499508 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499523 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499537 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499553 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499568 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499583 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499598 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499621 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499647 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499668 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499683 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499698 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499712 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499727 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499742 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499756 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499771 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499785 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499806 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvthw" event={"ID":"80e4f44d-4647-4e15-a29f-2672fc065d82","Type":"ContainerDied","Data":"3f3331c454b12866d65a3ec569560dd1a858f3a483355ab3d0fc8919e228f493"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499831 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499883 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499904 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499920 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499935 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499949 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499966 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499981 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.499995 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.500011 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.502541 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-nzxgf_0321f61a-9e40-47a2-b19f-a859fd6b890a/kube-multus/1.log" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.503584 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-nzxgf_0321f61a-9e40-47a2-b19f-a859fd6b890a/kube-multus/0.log" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.503678 4800 generic.go:334] "Generic (PLEG): container finished" podID="0321f61a-9e40-47a2-b19f-a859fd6b890a" containerID="f223a9adeb4f3035c5439f79a2c0e65bd4024420e203214517db732b46e41290" exitCode=2 Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.503738 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-nzxgf" event={"ID":"0321f61a-9e40-47a2-b19f-a859fd6b890a","Type":"ContainerDied","Data":"f223a9adeb4f3035c5439f79a2c0e65bd4024420e203214517db732b46e41290"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.503798 4800 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5"} Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.504604 4800 scope.go:117] "RemoveContainer" containerID="f223a9adeb4f3035c5439f79a2c0e65bd4024420e203214517db732b46e41290" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.544911 4800 scope.go:117] "RemoveContainer" containerID="6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.570715 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mvthw"] Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.576289 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mvthw"] Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.625050 4800 scope.go:117] "RemoveContainer" containerID="959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.650191 4800 scope.go:117] "RemoveContainer" containerID="f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.673239 4800 scope.go:117] "RemoveContainer" containerID="28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.698355 4800 scope.go:117] "RemoveContainer" containerID="ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.738199 4800 scope.go:117] "RemoveContainer" containerID="1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.780541 4800 scope.go:117] "RemoveContainer" containerID="9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.798387 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80e4f44d-4647-4e15-a29f-2672fc065d82" path="/var/lib/kubelet/pods/80e4f44d-4647-4e15-a29f-2672fc065d82/volumes" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.817466 4800 scope.go:117] "RemoveContainer" containerID="fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.867807 4800 scope.go:117] "RemoveContainer" containerID="01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.892299 4800 scope.go:117] "RemoveContainer" containerID="c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6" Nov 25 15:30:39 crc kubenswrapper[4800]: E1125 15:30:39.893015 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6\": container with ID starting with c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6 not found: ID does not exist" containerID="c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.893072 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6"} err="failed to get container status \"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6\": rpc error: code = NotFound desc = could not find container \"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6\": container with ID starting with c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.893269 4800 scope.go:117] "RemoveContainer" containerID="6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415" Nov 25 15:30:39 crc kubenswrapper[4800]: E1125 15:30:39.893800 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\": container with ID starting with 6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415 not found: ID does not exist" containerID="6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.893835 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415"} err="failed to get container status \"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\": rpc error: code = NotFound desc = could not find container \"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\": container with ID starting with 6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.893879 4800 scope.go:117] "RemoveContainer" containerID="959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451" Nov 25 15:30:39 crc kubenswrapper[4800]: E1125 15:30:39.894352 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\": container with ID starting with 959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451 not found: ID does not exist" containerID="959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.894390 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451"} err="failed to get container status \"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\": rpc error: code = NotFound desc = could not find container \"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\": container with ID starting with 959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.894417 4800 scope.go:117] "RemoveContainer" containerID="f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae" Nov 25 15:30:39 crc kubenswrapper[4800]: E1125 15:30:39.894673 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\": container with ID starting with f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae not found: ID does not exist" containerID="f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.894711 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae"} err="failed to get container status \"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\": rpc error: code = NotFound desc = could not find container \"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\": container with ID starting with f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.894735 4800 scope.go:117] "RemoveContainer" containerID="28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9" Nov 25 15:30:39 crc kubenswrapper[4800]: E1125 15:30:39.895094 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\": container with ID starting with 28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9 not found: ID does not exist" containerID="28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.895142 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9"} err="failed to get container status \"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\": rpc error: code = NotFound desc = could not find container \"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\": container with ID starting with 28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.895169 4800 scope.go:117] "RemoveContainer" containerID="ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8" Nov 25 15:30:39 crc kubenswrapper[4800]: E1125 15:30:39.895591 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\": container with ID starting with ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8 not found: ID does not exist" containerID="ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.895641 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8"} err="failed to get container status \"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\": rpc error: code = NotFound desc = could not find container \"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\": container with ID starting with ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.895668 4800 scope.go:117] "RemoveContainer" containerID="1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e" Nov 25 15:30:39 crc kubenswrapper[4800]: E1125 15:30:39.895966 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\": container with ID starting with 1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e not found: ID does not exist" containerID="1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.896006 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e"} err="failed to get container status \"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\": rpc error: code = NotFound desc = could not find container \"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\": container with ID starting with 1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.896032 4800 scope.go:117] "RemoveContainer" containerID="9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f" Nov 25 15:30:39 crc kubenswrapper[4800]: E1125 15:30:39.896283 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\": container with ID starting with 9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f not found: ID does not exist" containerID="9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.896312 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f"} err="failed to get container status \"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\": rpc error: code = NotFound desc = could not find container \"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\": container with ID starting with 9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.896324 4800 scope.go:117] "RemoveContainer" containerID="fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac" Nov 25 15:30:39 crc kubenswrapper[4800]: E1125 15:30:39.896481 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\": container with ID starting with fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac not found: ID does not exist" containerID="fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.896502 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac"} err="failed to get container status \"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\": rpc error: code = NotFound desc = could not find container \"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\": container with ID starting with fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.896514 4800 scope.go:117] "RemoveContainer" containerID="01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42" Nov 25 15:30:39 crc kubenswrapper[4800]: E1125 15:30:39.896683 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\": container with ID starting with 01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42 not found: ID does not exist" containerID="01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.896703 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42"} err="failed to get container status \"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\": rpc error: code = NotFound desc = could not find container \"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\": container with ID starting with 01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.896718 4800 scope.go:117] "RemoveContainer" containerID="c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.896956 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6"} err="failed to get container status \"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6\": rpc error: code = NotFound desc = could not find container \"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6\": container with ID starting with c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.896977 4800 scope.go:117] "RemoveContainer" containerID="6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.897251 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415"} err="failed to get container status \"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\": rpc error: code = NotFound desc = could not find container \"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\": container with ID starting with 6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.897281 4800 scope.go:117] "RemoveContainer" containerID="959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.897537 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451"} err="failed to get container status \"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\": rpc error: code = NotFound desc = could not find container \"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\": container with ID starting with 959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.897562 4800 scope.go:117] "RemoveContainer" containerID="f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.897764 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae"} err="failed to get container status \"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\": rpc error: code = NotFound desc = could not find container \"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\": container with ID starting with f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.897783 4800 scope.go:117] "RemoveContainer" containerID="28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.898000 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9"} err="failed to get container status \"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\": rpc error: code = NotFound desc = could not find container \"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\": container with ID starting with 28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.898025 4800 scope.go:117] "RemoveContainer" containerID="ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.898211 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8"} err="failed to get container status \"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\": rpc error: code = NotFound desc = could not find container \"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\": container with ID starting with ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.898227 4800 scope.go:117] "RemoveContainer" containerID="1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.898430 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e"} err="failed to get container status \"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\": rpc error: code = NotFound desc = could not find container \"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\": container with ID starting with 1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.898446 4800 scope.go:117] "RemoveContainer" containerID="9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.898606 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f"} err="failed to get container status \"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\": rpc error: code = NotFound desc = could not find container \"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\": container with ID starting with 9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.898625 4800 scope.go:117] "RemoveContainer" containerID="fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.898817 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac"} err="failed to get container status \"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\": rpc error: code = NotFound desc = could not find container \"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\": container with ID starting with fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.898853 4800 scope.go:117] "RemoveContainer" containerID="01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.899168 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42"} err="failed to get container status \"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\": rpc error: code = NotFound desc = could not find container \"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\": container with ID starting with 01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.899187 4800 scope.go:117] "RemoveContainer" containerID="c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.899381 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6"} err="failed to get container status \"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6\": rpc error: code = NotFound desc = could not find container \"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6\": container with ID starting with c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.899406 4800 scope.go:117] "RemoveContainer" containerID="6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.901654 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415"} err="failed to get container status \"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\": rpc error: code = NotFound desc = could not find container \"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\": container with ID starting with 6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.901685 4800 scope.go:117] "RemoveContainer" containerID="959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.902229 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451"} err="failed to get container status \"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\": rpc error: code = NotFound desc = could not find container \"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\": container with ID starting with 959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.902255 4800 scope.go:117] "RemoveContainer" containerID="f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.902573 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae"} err="failed to get container status \"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\": rpc error: code = NotFound desc = could not find container \"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\": container with ID starting with f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.902719 4800 scope.go:117] "RemoveContainer" containerID="28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.903921 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9"} err="failed to get container status \"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\": rpc error: code = NotFound desc = could not find container \"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\": container with ID starting with 28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.903957 4800 scope.go:117] "RemoveContainer" containerID="ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.904190 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8"} err="failed to get container status \"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\": rpc error: code = NotFound desc = could not find container \"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\": container with ID starting with ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.904218 4800 scope.go:117] "RemoveContainer" containerID="1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.904460 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e"} err="failed to get container status \"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\": rpc error: code = NotFound desc = could not find container \"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\": container with ID starting with 1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.904526 4800 scope.go:117] "RemoveContainer" containerID="9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.904727 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f"} err="failed to get container status \"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\": rpc error: code = NotFound desc = could not find container \"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\": container with ID starting with 9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.904746 4800 scope.go:117] "RemoveContainer" containerID="fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.905062 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac"} err="failed to get container status \"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\": rpc error: code = NotFound desc = could not find container \"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\": container with ID starting with fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.905177 4800 scope.go:117] "RemoveContainer" containerID="01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.906731 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42"} err="failed to get container status \"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\": rpc error: code = NotFound desc = could not find container \"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\": container with ID starting with 01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.906755 4800 scope.go:117] "RemoveContainer" containerID="c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.907791 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6"} err="failed to get container status \"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6\": rpc error: code = NotFound desc = could not find container \"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6\": container with ID starting with c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.907930 4800 scope.go:117] "RemoveContainer" containerID="6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.909093 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415"} err="failed to get container status \"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\": rpc error: code = NotFound desc = could not find container \"6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415\": container with ID starting with 6cbacd4b9d037e2fc278c9388f19d1d86dcc5be52e9aa4abe7cd24bf4adac415 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.909123 4800 scope.go:117] "RemoveContainer" containerID="959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.909418 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451"} err="failed to get container status \"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\": rpc error: code = NotFound desc = could not find container \"959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451\": container with ID starting with 959d9afc930760f7a4ca82daaa2a4cddb26dac9183ebf051f9e3551092f0c451 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.909449 4800 scope.go:117] "RemoveContainer" containerID="f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.909896 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae"} err="failed to get container status \"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\": rpc error: code = NotFound desc = could not find container \"f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae\": container with ID starting with f97d5e6ec9c84812f2fa162aaba7eed082356b651cb7a2a85550c023955293ae not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.909915 4800 scope.go:117] "RemoveContainer" containerID="28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.910292 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9"} err="failed to get container status \"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\": rpc error: code = NotFound desc = could not find container \"28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9\": container with ID starting with 28f418ac682b66efe94ead15fecd1e57be2de2a2e81276a50900f529213f4bc9 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.910312 4800 scope.go:117] "RemoveContainer" containerID="ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.910553 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8"} err="failed to get container status \"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\": rpc error: code = NotFound desc = could not find container \"ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8\": container with ID starting with ff30ff719598fe327c808a7a11717b63f90d71c5573b292d8318f660051bb0f8 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.910573 4800 scope.go:117] "RemoveContainer" containerID="1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.910797 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e"} err="failed to get container status \"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\": rpc error: code = NotFound desc = could not find container \"1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e\": container with ID starting with 1dea1d753e75384c41aa3a9a4513356000198dd13a8e8e5cd200ed065395ac4e not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.910818 4800 scope.go:117] "RemoveContainer" containerID="9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.911049 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f"} err="failed to get container status \"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\": rpc error: code = NotFound desc = could not find container \"9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f\": container with ID starting with 9c15b29f9206e90d521e2faf1f8e5403f0852b3be91ad6bc1d614646e1ac8f6f not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.911066 4800 scope.go:117] "RemoveContainer" containerID="fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.911310 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac"} err="failed to get container status \"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\": rpc error: code = NotFound desc = could not find container \"fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac\": container with ID starting with fc271efd02a92f8f2dc4220e61ab03b093fb3b3097e72ddf9231d096841443ac not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.911327 4800 scope.go:117] "RemoveContainer" containerID="01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.911474 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42"} err="failed to get container status \"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\": rpc error: code = NotFound desc = could not find container \"01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42\": container with ID starting with 01733c86aa53d296a97e6f1dfc34ca14ccd60b7b2123a427679e9f3ae8a50b42 not found: ID does not exist" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.911492 4800 scope.go:117] "RemoveContainer" containerID="c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6" Nov 25 15:30:39 crc kubenswrapper[4800]: I1125 15:30:39.911643 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6"} err="failed to get container status \"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6\": rpc error: code = NotFound desc = could not find container \"c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6\": container with ID starting with c6ce218b5061a7cb71ca3717c02e19eb0ff7b28774a381f18c344a5f6eecb7e6 not found: ID does not exist" Nov 25 15:30:40 crc kubenswrapper[4800]: I1125 15:30:40.512671 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-nzxgf_0321f61a-9e40-47a2-b19f-a859fd6b890a/kube-multus/1.log" Nov 25 15:30:40 crc kubenswrapper[4800]: I1125 15:30:40.513743 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-nzxgf_0321f61a-9e40-47a2-b19f-a859fd6b890a/kube-multus/0.log" Nov 25 15:30:40 crc kubenswrapper[4800]: I1125 15:30:40.513879 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-nzxgf" event={"ID":"0321f61a-9e40-47a2-b19f-a859fd6b890a","Type":"ContainerStarted","Data":"4e48413a536c30c3a796f2c217b94b734ef979a45afa0c6a07a78a69f4831e0d"} Nov 25 15:30:40 crc kubenswrapper[4800]: I1125 15:30:40.518377 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" event={"ID":"9dac80ee-9d7a-4194-9834-f4b1e2915017","Type":"ContainerStarted","Data":"644e2b60866b521dfe832fbc4888201caf97563c6b142263fb4f38059ce2a529"} Nov 25 15:30:40 crc kubenswrapper[4800]: I1125 15:30:40.518435 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" event={"ID":"9dac80ee-9d7a-4194-9834-f4b1e2915017","Type":"ContainerStarted","Data":"001635dc387d081708d5713e83cc0ff6429b215ebf8eec55acc52563832f6875"} Nov 25 15:30:40 crc kubenswrapper[4800]: I1125 15:30:40.518451 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" event={"ID":"9dac80ee-9d7a-4194-9834-f4b1e2915017","Type":"ContainerStarted","Data":"db31f9eecc47c25e929cb0e1e184e3cf4bdf125698f6548a055d6d033fd8c88e"} Nov 25 15:30:40 crc kubenswrapper[4800]: I1125 15:30:40.518469 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" event={"ID":"9dac80ee-9d7a-4194-9834-f4b1e2915017","Type":"ContainerStarted","Data":"d79f069c25cf5278724cea7de8fdbd6dfa209f3fd08afea4fea547d61e610dc2"} Nov 25 15:30:40 crc kubenswrapper[4800]: I1125 15:30:40.518487 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" event={"ID":"9dac80ee-9d7a-4194-9834-f4b1e2915017","Type":"ContainerStarted","Data":"05aad4987af0896555b6ddb641fa745f2d2fcae3dad751660210f6d52fe8a12b"} Nov 25 15:30:40 crc kubenswrapper[4800]: I1125 15:30:40.518505 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" event={"ID":"9dac80ee-9d7a-4194-9834-f4b1e2915017","Type":"ContainerStarted","Data":"42a107d091aff30940e716f42d5b1fc8967661e848a65d9d8ab768239ac95997"} Nov 25 15:30:42 crc kubenswrapper[4800]: I1125 15:30:42.534771 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" event={"ID":"9dac80ee-9d7a-4194-9834-f4b1e2915017","Type":"ContainerStarted","Data":"560b2ed20b45eefd24ba18dbee10c80b20d0c3f2c1b23829f30664d0c90fb569"} Nov 25 15:30:42 crc kubenswrapper[4800]: I1125 15:30:42.639826 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:30:42 crc kubenswrapper[4800]: I1125 15:30:42.639939 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:30:43 crc kubenswrapper[4800]: I1125 15:30:43.220870 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-z579p" Nov 25 15:30:45 crc kubenswrapper[4800]: I1125 15:30:45.557921 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" event={"ID":"9dac80ee-9d7a-4194-9834-f4b1e2915017","Type":"ContainerStarted","Data":"00e7b93a531f7d135f56749fda4e8d8e8f53b6799d266836c35938821c4b3ebd"} Nov 25 15:30:45 crc kubenswrapper[4800]: I1125 15:30:45.558526 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:45 crc kubenswrapper[4800]: I1125 15:30:45.558547 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:45 crc kubenswrapper[4800]: I1125 15:30:45.588288 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:45 crc kubenswrapper[4800]: I1125 15:30:45.593001 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" podStartSLOduration=7.592980189 podStartE2EDuration="7.592980189s" podCreationTimestamp="2025-11-25 15:30:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:30:45.589796335 +0000 UTC m=+806.644204817" watchObservedRunningTime="2025-11-25 15:30:45.592980189 +0000 UTC m=+806.647388661" Nov 25 15:30:46 crc kubenswrapper[4800]: I1125 15:30:46.563407 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:30:46 crc kubenswrapper[4800]: I1125 15:30:46.592601 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:31:09 crc kubenswrapper[4800]: I1125 15:31:09.185599 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tsgld" Nov 25 15:31:12 crc kubenswrapper[4800]: I1125 15:31:12.639956 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:31:12 crc kubenswrapper[4800]: I1125 15:31:12.640489 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:31:12 crc kubenswrapper[4800]: I1125 15:31:12.640537 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:31:12 crc kubenswrapper[4800]: I1125 15:31:12.641132 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4696a8bc43181471c595e8254afda59b2987f94a9cd2a837cdce4a6a707e3c00"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 15:31:12 crc kubenswrapper[4800]: I1125 15:31:12.641185 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://4696a8bc43181471c595e8254afda59b2987f94a9cd2a837cdce4a6a707e3c00" gracePeriod=600 Nov 25 15:31:13 crc kubenswrapper[4800]: I1125 15:31:13.774346 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="4696a8bc43181471c595e8254afda59b2987f94a9cd2a837cdce4a6a707e3c00" exitCode=0 Nov 25 15:31:13 crc kubenswrapper[4800]: I1125 15:31:13.774422 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"4696a8bc43181471c595e8254afda59b2987f94a9cd2a837cdce4a6a707e3c00"} Nov 25 15:31:13 crc kubenswrapper[4800]: I1125 15:31:13.774834 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"1a9b7db7d78c7762803114dfba2c97d5027abe1ed7fd4f553dedba984708c24e"} Nov 25 15:31:13 crc kubenswrapper[4800]: I1125 15:31:13.774905 4800 scope.go:117] "RemoveContainer" containerID="6935127ce0963b69f67a19a19c36aed432bd71e361cc6d229fd1e9b0d2ffa635" Nov 25 15:31:21 crc kubenswrapper[4800]: I1125 15:31:21.373084 4800 scope.go:117] "RemoveContainer" containerID="7aaaaea0fdc33042707dadbb36f78d525da92bfe5e2275f45a55cc566c30d4e5" Nov 25 15:31:21 crc kubenswrapper[4800]: I1125 15:31:21.823629 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-nzxgf_0321f61a-9e40-47a2-b19f-a859fd6b890a/kube-multus/1.log" Nov 25 15:31:29 crc kubenswrapper[4800]: I1125 15:31:29.917587 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7"] Nov 25 15:31:29 crc kubenswrapper[4800]: I1125 15:31:29.919273 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:29 crc kubenswrapper[4800]: I1125 15:31:29.925501 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 15:31:29 crc kubenswrapper[4800]: I1125 15:31:29.933688 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7"] Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.027330 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/435cdb5d-d3d7-4bd1-bda3-a6994c189210-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7\" (UID: \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.027474 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/435cdb5d-d3d7-4bd1-bda3-a6994c189210-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7\" (UID: \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.027509 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls6st\" (UniqueName: \"kubernetes.io/projected/435cdb5d-d3d7-4bd1-bda3-a6994c189210-kube-api-access-ls6st\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7\" (UID: \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.128867 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/435cdb5d-d3d7-4bd1-bda3-a6994c189210-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7\" (UID: \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.129388 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/435cdb5d-d3d7-4bd1-bda3-a6994c189210-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7\" (UID: \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.129392 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/435cdb5d-d3d7-4bd1-bda3-a6994c189210-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7\" (UID: \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.129495 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls6st\" (UniqueName: \"kubernetes.io/projected/435cdb5d-d3d7-4bd1-bda3-a6994c189210-kube-api-access-ls6st\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7\" (UID: \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.129948 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/435cdb5d-d3d7-4bd1-bda3-a6994c189210-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7\" (UID: \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.154526 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls6st\" (UniqueName: \"kubernetes.io/projected/435cdb5d-d3d7-4bd1-bda3-a6994c189210-kube-api-access-ls6st\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7\" (UID: \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.235938 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.431996 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7"] Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.872000 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" event={"ID":"435cdb5d-d3d7-4bd1-bda3-a6994c189210","Type":"ContainerStarted","Data":"4947b31fd575a84860429c1c0ac53869a3f185d43d1c1796e3453ac6c43b3c70"} Nov 25 15:31:30 crc kubenswrapper[4800]: I1125 15:31:30.872783 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" event={"ID":"435cdb5d-d3d7-4bd1-bda3-a6994c189210","Type":"ContainerStarted","Data":"f812fd642198b17155191203ab2310ecf741041ca6608d571456e36c18cda572"} Nov 25 15:31:31 crc kubenswrapper[4800]: I1125 15:31:31.879798 4800 generic.go:334] "Generic (PLEG): container finished" podID="435cdb5d-d3d7-4bd1-bda3-a6994c189210" containerID="4947b31fd575a84860429c1c0ac53869a3f185d43d1c1796e3453ac6c43b3c70" exitCode=0 Nov 25 15:31:31 crc kubenswrapper[4800]: I1125 15:31:31.879914 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" event={"ID":"435cdb5d-d3d7-4bd1-bda3-a6994c189210","Type":"ContainerDied","Data":"4947b31fd575a84860429c1c0ac53869a3f185d43d1c1796e3453ac6c43b3c70"} Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.000835 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4p22j"] Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.002575 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.007781 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4p22j"] Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.155763 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/218fd672-a425-47d7-b947-eb51e10bd5a8-utilities\") pod \"redhat-operators-4p22j\" (UID: \"218fd672-a425-47d7-b947-eb51e10bd5a8\") " pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.156163 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/218fd672-a425-47d7-b947-eb51e10bd5a8-catalog-content\") pod \"redhat-operators-4p22j\" (UID: \"218fd672-a425-47d7-b947-eb51e10bd5a8\") " pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.156197 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5v57\" (UniqueName: \"kubernetes.io/projected/218fd672-a425-47d7-b947-eb51e10bd5a8-kube-api-access-b5v57\") pod \"redhat-operators-4p22j\" (UID: \"218fd672-a425-47d7-b947-eb51e10bd5a8\") " pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.256992 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/218fd672-a425-47d7-b947-eb51e10bd5a8-utilities\") pod \"redhat-operators-4p22j\" (UID: \"218fd672-a425-47d7-b947-eb51e10bd5a8\") " pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.257068 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/218fd672-a425-47d7-b947-eb51e10bd5a8-catalog-content\") pod \"redhat-operators-4p22j\" (UID: \"218fd672-a425-47d7-b947-eb51e10bd5a8\") " pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.257088 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5v57\" (UniqueName: \"kubernetes.io/projected/218fd672-a425-47d7-b947-eb51e10bd5a8-kube-api-access-b5v57\") pod \"redhat-operators-4p22j\" (UID: \"218fd672-a425-47d7-b947-eb51e10bd5a8\") " pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.257649 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/218fd672-a425-47d7-b947-eb51e10bd5a8-utilities\") pod \"redhat-operators-4p22j\" (UID: \"218fd672-a425-47d7-b947-eb51e10bd5a8\") " pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.257719 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/218fd672-a425-47d7-b947-eb51e10bd5a8-catalog-content\") pod \"redhat-operators-4p22j\" (UID: \"218fd672-a425-47d7-b947-eb51e10bd5a8\") " pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.283915 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5v57\" (UniqueName: \"kubernetes.io/projected/218fd672-a425-47d7-b947-eb51e10bd5a8-kube-api-access-b5v57\") pod \"redhat-operators-4p22j\" (UID: \"218fd672-a425-47d7-b947-eb51e10bd5a8\") " pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.327403 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.563294 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4p22j"] Nov 25 15:31:32 crc kubenswrapper[4800]: W1125 15:31:32.576993 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod218fd672_a425_47d7_b947_eb51e10bd5a8.slice/crio-8317f34e649bb52ac27890608857f4aa8304e58d120496b6a0304937370c7cb7 WatchSource:0}: Error finding container 8317f34e649bb52ac27890608857f4aa8304e58d120496b6a0304937370c7cb7: Status 404 returned error can't find the container with id 8317f34e649bb52ac27890608857f4aa8304e58d120496b6a0304937370c7cb7 Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.886373 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p22j" event={"ID":"218fd672-a425-47d7-b947-eb51e10bd5a8","Type":"ContainerStarted","Data":"033fe464899f8f5cc8c8b5f9579bf0872bb499def5c50e2a87bb1f93186c0091"} Nov 25 15:31:32 crc kubenswrapper[4800]: I1125 15:31:32.886421 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p22j" event={"ID":"218fd672-a425-47d7-b947-eb51e10bd5a8","Type":"ContainerStarted","Data":"8317f34e649bb52ac27890608857f4aa8304e58d120496b6a0304937370c7cb7"} Nov 25 15:31:33 crc kubenswrapper[4800]: I1125 15:31:33.893621 4800 generic.go:334] "Generic (PLEG): container finished" podID="218fd672-a425-47d7-b947-eb51e10bd5a8" containerID="033fe464899f8f5cc8c8b5f9579bf0872bb499def5c50e2a87bb1f93186c0091" exitCode=0 Nov 25 15:31:33 crc kubenswrapper[4800]: I1125 15:31:33.893731 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p22j" event={"ID":"218fd672-a425-47d7-b947-eb51e10bd5a8","Type":"ContainerDied","Data":"033fe464899f8f5cc8c8b5f9579bf0872bb499def5c50e2a87bb1f93186c0091"} Nov 25 15:31:33 crc kubenswrapper[4800]: I1125 15:31:33.898695 4800 generic.go:334] "Generic (PLEG): container finished" podID="435cdb5d-d3d7-4bd1-bda3-a6994c189210" containerID="8e0755e4d05c9c1af391bf270adf0b3e0c4348384e90240079bb1adf242801f1" exitCode=0 Nov 25 15:31:33 crc kubenswrapper[4800]: I1125 15:31:33.898727 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" event={"ID":"435cdb5d-d3d7-4bd1-bda3-a6994c189210","Type":"ContainerDied","Data":"8e0755e4d05c9c1af391bf270adf0b3e0c4348384e90240079bb1adf242801f1"} Nov 25 15:31:34 crc kubenswrapper[4800]: I1125 15:31:34.909810 4800 generic.go:334] "Generic (PLEG): container finished" podID="435cdb5d-d3d7-4bd1-bda3-a6994c189210" containerID="3ea74102774d27bc7364562eb4c23a2f11f71905ffd28ac7cac98f6eee1c98b6" exitCode=0 Nov 25 15:31:34 crc kubenswrapper[4800]: I1125 15:31:34.909897 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" event={"ID":"435cdb5d-d3d7-4bd1-bda3-a6994c189210","Type":"ContainerDied","Data":"3ea74102774d27bc7364562eb4c23a2f11f71905ffd28ac7cac98f6eee1c98b6"} Nov 25 15:31:35 crc kubenswrapper[4800]: I1125 15:31:35.919381 4800 generic.go:334] "Generic (PLEG): container finished" podID="218fd672-a425-47d7-b947-eb51e10bd5a8" containerID="dc4ff2214824df6828cd45eb4b3a384629ba821f15a6cef04fe8ee75a3d77580" exitCode=0 Nov 25 15:31:35 crc kubenswrapper[4800]: I1125 15:31:35.919492 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p22j" event={"ID":"218fd672-a425-47d7-b947-eb51e10bd5a8","Type":"ContainerDied","Data":"dc4ff2214824df6828cd45eb4b3a384629ba821f15a6cef04fe8ee75a3d77580"} Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.157921 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.321524 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/435cdb5d-d3d7-4bd1-bda3-a6994c189210-bundle\") pod \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\" (UID: \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\") " Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.321764 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/435cdb5d-d3d7-4bd1-bda3-a6994c189210-util\") pod \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\" (UID: \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\") " Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.321800 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ls6st\" (UniqueName: \"kubernetes.io/projected/435cdb5d-d3d7-4bd1-bda3-a6994c189210-kube-api-access-ls6st\") pod \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\" (UID: \"435cdb5d-d3d7-4bd1-bda3-a6994c189210\") " Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.322815 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/435cdb5d-d3d7-4bd1-bda3-a6994c189210-bundle" (OuterVolumeSpecName: "bundle") pod "435cdb5d-d3d7-4bd1-bda3-a6994c189210" (UID: "435cdb5d-d3d7-4bd1-bda3-a6994c189210"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.329538 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/435cdb5d-d3d7-4bd1-bda3-a6994c189210-kube-api-access-ls6st" (OuterVolumeSpecName: "kube-api-access-ls6st") pod "435cdb5d-d3d7-4bd1-bda3-a6994c189210" (UID: "435cdb5d-d3d7-4bd1-bda3-a6994c189210"). InnerVolumeSpecName "kube-api-access-ls6st". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.424017 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ls6st\" (UniqueName: \"kubernetes.io/projected/435cdb5d-d3d7-4bd1-bda3-a6994c189210-kube-api-access-ls6st\") on node \"crc\" DevicePath \"\"" Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.424059 4800 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/435cdb5d-d3d7-4bd1-bda3-a6994c189210-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.515502 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/435cdb5d-d3d7-4bd1-bda3-a6994c189210-util" (OuterVolumeSpecName: "util") pod "435cdb5d-d3d7-4bd1-bda3-a6994c189210" (UID: "435cdb5d-d3d7-4bd1-bda3-a6994c189210"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.525515 4800 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/435cdb5d-d3d7-4bd1-bda3-a6994c189210-util\") on node \"crc\" DevicePath \"\"" Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.929407 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" event={"ID":"435cdb5d-d3d7-4bd1-bda3-a6994c189210","Type":"ContainerDied","Data":"f812fd642198b17155191203ab2310ecf741041ca6608d571456e36c18cda572"} Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.930447 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f812fd642198b17155191203ab2310ecf741041ca6608d571456e36c18cda572" Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.929461 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7" Nov 25 15:31:36 crc kubenswrapper[4800]: I1125 15:31:36.934924 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p22j" event={"ID":"218fd672-a425-47d7-b947-eb51e10bd5a8","Type":"ContainerStarted","Data":"8ec3f4f5a75823a4f613eaba382edec4e61ffc0a9d0b2cdccc0dc36349d40c67"} Nov 25 15:31:37 crc kubenswrapper[4800]: I1125 15:31:37.200281 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4p22j" podStartSLOduration=3.461460673 podStartE2EDuration="6.20022787s" podCreationTimestamp="2025-11-25 15:31:31 +0000 UTC" firstStartedPulling="2025-11-25 15:31:33.897205024 +0000 UTC m=+854.951613526" lastFinishedPulling="2025-11-25 15:31:36.635972241 +0000 UTC m=+857.690380723" observedRunningTime="2025-11-25 15:31:36.956184641 +0000 UTC m=+858.010593143" watchObservedRunningTime="2025-11-25 15:31:37.20022787 +0000 UTC m=+858.254636352" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.085094 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-cmjwd"] Nov 25 15:31:41 crc kubenswrapper[4800]: E1125 15:31:41.086024 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="435cdb5d-d3d7-4bd1-bda3-a6994c189210" containerName="extract" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.086043 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="435cdb5d-d3d7-4bd1-bda3-a6994c189210" containerName="extract" Nov 25 15:31:41 crc kubenswrapper[4800]: E1125 15:31:41.086057 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="435cdb5d-d3d7-4bd1-bda3-a6994c189210" containerName="pull" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.086068 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="435cdb5d-d3d7-4bd1-bda3-a6994c189210" containerName="pull" Nov 25 15:31:41 crc kubenswrapper[4800]: E1125 15:31:41.086086 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="435cdb5d-d3d7-4bd1-bda3-a6994c189210" containerName="util" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.086095 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="435cdb5d-d3d7-4bd1-bda3-a6994c189210" containerName="util" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.086246 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="435cdb5d-d3d7-4bd1-bda3-a6994c189210" containerName="extract" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.086894 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-cmjwd" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.090263 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-t59pv" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.090320 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.092392 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.094356 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbhcs\" (UniqueName: \"kubernetes.io/projected/b0d67bab-8969-4b12-a7f1-e37e02e45afa-kube-api-access-mbhcs\") pod \"nmstate-operator-557fdffb88-cmjwd\" (UID: \"b0d67bab-8969-4b12-a7f1-e37e02e45afa\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-cmjwd" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.095777 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-cmjwd"] Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.195864 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbhcs\" (UniqueName: \"kubernetes.io/projected/b0d67bab-8969-4b12-a7f1-e37e02e45afa-kube-api-access-mbhcs\") pod \"nmstate-operator-557fdffb88-cmjwd\" (UID: \"b0d67bab-8969-4b12-a7f1-e37e02e45afa\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-cmjwd" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.219051 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbhcs\" (UniqueName: \"kubernetes.io/projected/b0d67bab-8969-4b12-a7f1-e37e02e45afa-kube-api-access-mbhcs\") pod \"nmstate-operator-557fdffb88-cmjwd\" (UID: \"b0d67bab-8969-4b12-a7f1-e37e02e45afa\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-cmjwd" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.408134 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-cmjwd" Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.836636 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-cmjwd"] Nov 25 15:31:41 crc kubenswrapper[4800]: I1125 15:31:41.986959 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-cmjwd" event={"ID":"b0d67bab-8969-4b12-a7f1-e37e02e45afa","Type":"ContainerStarted","Data":"2afc91819264279b212ed6f64ae99702c09588f21cbbc10ccf14a6f148209c16"} Nov 25 15:31:42 crc kubenswrapper[4800]: I1125 15:31:42.328482 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:42 crc kubenswrapper[4800]: I1125 15:31:42.328774 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:42 crc kubenswrapper[4800]: I1125 15:31:42.391356 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:43 crc kubenswrapper[4800]: I1125 15:31:43.034555 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:44 crc kubenswrapper[4800]: I1125 15:31:44.977448 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4p22j"] Nov 25 15:31:46 crc kubenswrapper[4800]: I1125 15:31:46.014817 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4p22j" podUID="218fd672-a425-47d7-b947-eb51e10bd5a8" containerName="registry-server" containerID="cri-o://8ec3f4f5a75823a4f613eaba382edec4e61ffc0a9d0b2cdccc0dc36349d40c67" gracePeriod=2 Nov 25 15:31:47 crc kubenswrapper[4800]: I1125 15:31:47.025227 4800 generic.go:334] "Generic (PLEG): container finished" podID="218fd672-a425-47d7-b947-eb51e10bd5a8" containerID="8ec3f4f5a75823a4f613eaba382edec4e61ffc0a9d0b2cdccc0dc36349d40c67" exitCode=0 Nov 25 15:31:47 crc kubenswrapper[4800]: I1125 15:31:47.025290 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p22j" event={"ID":"218fd672-a425-47d7-b947-eb51e10bd5a8","Type":"ContainerDied","Data":"8ec3f4f5a75823a4f613eaba382edec4e61ffc0a9d0b2cdccc0dc36349d40c67"} Nov 25 15:31:47 crc kubenswrapper[4800]: I1125 15:31:47.381986 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:47 crc kubenswrapper[4800]: I1125 15:31:47.432174 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5v57\" (UniqueName: \"kubernetes.io/projected/218fd672-a425-47d7-b947-eb51e10bd5a8-kube-api-access-b5v57\") pod \"218fd672-a425-47d7-b947-eb51e10bd5a8\" (UID: \"218fd672-a425-47d7-b947-eb51e10bd5a8\") " Nov 25 15:31:47 crc kubenswrapper[4800]: I1125 15:31:47.432254 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/218fd672-a425-47d7-b947-eb51e10bd5a8-utilities\") pod \"218fd672-a425-47d7-b947-eb51e10bd5a8\" (UID: \"218fd672-a425-47d7-b947-eb51e10bd5a8\") " Nov 25 15:31:47 crc kubenswrapper[4800]: I1125 15:31:47.432318 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/218fd672-a425-47d7-b947-eb51e10bd5a8-catalog-content\") pod \"218fd672-a425-47d7-b947-eb51e10bd5a8\" (UID: \"218fd672-a425-47d7-b947-eb51e10bd5a8\") " Nov 25 15:31:47 crc kubenswrapper[4800]: I1125 15:31:47.433689 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/218fd672-a425-47d7-b947-eb51e10bd5a8-utilities" (OuterVolumeSpecName: "utilities") pod "218fd672-a425-47d7-b947-eb51e10bd5a8" (UID: "218fd672-a425-47d7-b947-eb51e10bd5a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:31:47 crc kubenswrapper[4800]: I1125 15:31:47.442223 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/218fd672-a425-47d7-b947-eb51e10bd5a8-kube-api-access-b5v57" (OuterVolumeSpecName: "kube-api-access-b5v57") pod "218fd672-a425-47d7-b947-eb51e10bd5a8" (UID: "218fd672-a425-47d7-b947-eb51e10bd5a8"). InnerVolumeSpecName "kube-api-access-b5v57". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:31:47 crc kubenswrapper[4800]: I1125 15:31:47.533021 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/218fd672-a425-47d7-b947-eb51e10bd5a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "218fd672-a425-47d7-b947-eb51e10bd5a8" (UID: "218fd672-a425-47d7-b947-eb51e10bd5a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:31:47 crc kubenswrapper[4800]: I1125 15:31:47.534226 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5v57\" (UniqueName: \"kubernetes.io/projected/218fd672-a425-47d7-b947-eb51e10bd5a8-kube-api-access-b5v57\") on node \"crc\" DevicePath \"\"" Nov 25 15:31:47 crc kubenswrapper[4800]: I1125 15:31:47.534257 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/218fd672-a425-47d7-b947-eb51e10bd5a8-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:31:47 crc kubenswrapper[4800]: I1125 15:31:47.534268 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/218fd672-a425-47d7-b947-eb51e10bd5a8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:31:48 crc kubenswrapper[4800]: I1125 15:31:48.035821 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4p22j" Nov 25 15:31:48 crc kubenswrapper[4800]: I1125 15:31:48.035812 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p22j" event={"ID":"218fd672-a425-47d7-b947-eb51e10bd5a8","Type":"ContainerDied","Data":"8317f34e649bb52ac27890608857f4aa8304e58d120496b6a0304937370c7cb7"} Nov 25 15:31:48 crc kubenswrapper[4800]: I1125 15:31:48.036525 4800 scope.go:117] "RemoveContainer" containerID="8ec3f4f5a75823a4f613eaba382edec4e61ffc0a9d0b2cdccc0dc36349d40c67" Nov 25 15:31:48 crc kubenswrapper[4800]: I1125 15:31:48.038790 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-cmjwd" event={"ID":"b0d67bab-8969-4b12-a7f1-e37e02e45afa","Type":"ContainerStarted","Data":"92685cd91693591eff4afe82403e45803d804c15829b1023d0eab078a6e5a143"} Nov 25 15:31:48 crc kubenswrapper[4800]: I1125 15:31:48.056864 4800 scope.go:117] "RemoveContainer" containerID="dc4ff2214824df6828cd45eb4b3a384629ba821f15a6cef04fe8ee75a3d77580" Nov 25 15:31:48 crc kubenswrapper[4800]: I1125 15:31:48.063449 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-cmjwd" podStartSLOduration=1.69125378 podStartE2EDuration="7.063420006s" podCreationTimestamp="2025-11-25 15:31:41 +0000 UTC" firstStartedPulling="2025-11-25 15:31:41.846441928 +0000 UTC m=+862.900850410" lastFinishedPulling="2025-11-25 15:31:47.218608154 +0000 UTC m=+868.273016636" observedRunningTime="2025-11-25 15:31:48.061811353 +0000 UTC m=+869.116219835" watchObservedRunningTime="2025-11-25 15:31:48.063420006 +0000 UTC m=+869.117828488" Nov 25 15:31:48 crc kubenswrapper[4800]: I1125 15:31:48.086904 4800 scope.go:117] "RemoveContainer" containerID="033fe464899f8f5cc8c8b5f9579bf0872bb499def5c50e2a87bb1f93186c0091" Nov 25 15:31:48 crc kubenswrapper[4800]: I1125 15:31:48.090948 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4p22j"] Nov 25 15:31:48 crc kubenswrapper[4800]: I1125 15:31:48.095321 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4p22j"] Nov 25 15:31:49 crc kubenswrapper[4800]: I1125 15:31:49.793320 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="218fd672-a425-47d7-b947-eb51e10bd5a8" path="/var/lib/kubelet/pods/218fd672-a425-47d7-b947-eb51e10bd5a8/volumes" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.274292 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fqbmm"] Nov 25 15:31:50 crc kubenswrapper[4800]: E1125 15:31:50.274501 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="218fd672-a425-47d7-b947-eb51e10bd5a8" containerName="extract-utilities" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.274513 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="218fd672-a425-47d7-b947-eb51e10bd5a8" containerName="extract-utilities" Nov 25 15:31:50 crc kubenswrapper[4800]: E1125 15:31:50.274524 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="218fd672-a425-47d7-b947-eb51e10bd5a8" containerName="registry-server" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.274532 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="218fd672-a425-47d7-b947-eb51e10bd5a8" containerName="registry-server" Nov 25 15:31:50 crc kubenswrapper[4800]: E1125 15:31:50.274555 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="218fd672-a425-47d7-b947-eb51e10bd5a8" containerName="extract-content" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.274568 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="218fd672-a425-47d7-b947-eb51e10bd5a8" containerName="extract-content" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.274691 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="218fd672-a425-47d7-b947-eb51e10bd5a8" containerName="registry-server" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.275404 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fqbmm" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.279826 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-vctbh" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.290047 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-629l4"] Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.290753 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.294253 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.298707 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fqbmm"] Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.314272 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-gbvvs"] Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.315154 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.317725 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-629l4"] Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.371748 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/b4293598-b73e-407c-a146-3dcc03673ff6-dbus-socket\") pod \"nmstate-handler-gbvvs\" (UID: \"b4293598-b73e-407c-a146-3dcc03673ff6\") " pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.371811 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/b4293598-b73e-407c-a146-3dcc03673ff6-nmstate-lock\") pod \"nmstate-handler-gbvvs\" (UID: \"b4293598-b73e-407c-a146-3dcc03673ff6\") " pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.371855 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/ee71ff08-0d60-4527-8892-b804feba7c02-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-629l4\" (UID: \"ee71ff08-0d60-4527-8892-b804feba7c02\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.372077 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74gqc\" (UniqueName: \"kubernetes.io/projected/b4293598-b73e-407c-a146-3dcc03673ff6-kube-api-access-74gqc\") pod \"nmstate-handler-gbvvs\" (UID: \"b4293598-b73e-407c-a146-3dcc03673ff6\") " pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.372336 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/b4293598-b73e-407c-a146-3dcc03673ff6-ovs-socket\") pod \"nmstate-handler-gbvvs\" (UID: \"b4293598-b73e-407c-a146-3dcc03673ff6\") " pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.372472 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dlkg\" (UniqueName: \"kubernetes.io/projected/ee71ff08-0d60-4527-8892-b804feba7c02-kube-api-access-8dlkg\") pod \"nmstate-webhook-6b89b748d8-629l4\" (UID: \"ee71ff08-0d60-4527-8892-b804feba7c02\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.372518 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvmlz\" (UniqueName: \"kubernetes.io/projected/6120d1d1-7abc-494b-8e0f-da1ac9b5324f-kube-api-access-fvmlz\") pod \"nmstate-metrics-5dcf9c57c5-fqbmm\" (UID: \"6120d1d1-7abc-494b-8e0f-da1ac9b5324f\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fqbmm" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.420893 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4"] Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.438516 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.441690 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.442016 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-nmb4h" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.442514 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.468673 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4"] Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.473376 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/b4293598-b73e-407c-a146-3dcc03673ff6-dbus-socket\") pod \"nmstate-handler-gbvvs\" (UID: \"b4293598-b73e-407c-a146-3dcc03673ff6\") " pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.473599 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/3d4b604f-f606-4565-9094-d61a6c3275f1-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-krpt4\" (UID: \"3d4b604f-f606-4565-9094-d61a6c3275f1\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.473717 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/b4293598-b73e-407c-a146-3dcc03673ff6-nmstate-lock\") pod \"nmstate-handler-gbvvs\" (UID: \"b4293598-b73e-407c-a146-3dcc03673ff6\") " pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.473821 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/ee71ff08-0d60-4527-8892-b804feba7c02-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-629l4\" (UID: \"ee71ff08-0d60-4527-8892-b804feba7c02\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.473966 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/3d4b604f-f606-4565-9094-d61a6c3275f1-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-krpt4\" (UID: \"3d4b604f-f606-4565-9094-d61a6c3275f1\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.474080 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74gqc\" (UniqueName: \"kubernetes.io/projected/b4293598-b73e-407c-a146-3dcc03673ff6-kube-api-access-74gqc\") pod \"nmstate-handler-gbvvs\" (UID: \"b4293598-b73e-407c-a146-3dcc03673ff6\") " pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.473879 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/b4293598-b73e-407c-a146-3dcc03673ff6-dbus-socket\") pod \"nmstate-handler-gbvvs\" (UID: \"b4293598-b73e-407c-a146-3dcc03673ff6\") " pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.473815 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/b4293598-b73e-407c-a146-3dcc03673ff6-nmstate-lock\") pod \"nmstate-handler-gbvvs\" (UID: \"b4293598-b73e-407c-a146-3dcc03673ff6\") " pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: E1125 15:31:50.473997 4800 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.474397 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q64mg\" (UniqueName: \"kubernetes.io/projected/3d4b604f-f606-4565-9094-d61a6c3275f1-kube-api-access-q64mg\") pod \"nmstate-console-plugin-5874bd7bc5-krpt4\" (UID: \"3d4b604f-f606-4565-9094-d61a6c3275f1\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" Nov 25 15:31:50 crc kubenswrapper[4800]: E1125 15:31:50.474478 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ee71ff08-0d60-4527-8892-b804feba7c02-tls-key-pair podName:ee71ff08-0d60-4527-8892-b804feba7c02 nodeName:}" failed. No retries permitted until 2025-11-25 15:31:50.974395166 +0000 UTC m=+872.028803648 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/ee71ff08-0d60-4527-8892-b804feba7c02-tls-key-pair") pod "nmstate-webhook-6b89b748d8-629l4" (UID: "ee71ff08-0d60-4527-8892-b804feba7c02") : secret "openshift-nmstate-webhook" not found Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.474668 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/b4293598-b73e-407c-a146-3dcc03673ff6-ovs-socket\") pod \"nmstate-handler-gbvvs\" (UID: \"b4293598-b73e-407c-a146-3dcc03673ff6\") " pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.474749 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dlkg\" (UniqueName: \"kubernetes.io/projected/ee71ff08-0d60-4527-8892-b804feba7c02-kube-api-access-8dlkg\") pod \"nmstate-webhook-6b89b748d8-629l4\" (UID: \"ee71ff08-0d60-4527-8892-b804feba7c02\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.474778 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvmlz\" (UniqueName: \"kubernetes.io/projected/6120d1d1-7abc-494b-8e0f-da1ac9b5324f-kube-api-access-fvmlz\") pod \"nmstate-metrics-5dcf9c57c5-fqbmm\" (UID: \"6120d1d1-7abc-494b-8e0f-da1ac9b5324f\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fqbmm" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.474805 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/b4293598-b73e-407c-a146-3dcc03673ff6-ovs-socket\") pod \"nmstate-handler-gbvvs\" (UID: \"b4293598-b73e-407c-a146-3dcc03673ff6\") " pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.493706 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvmlz\" (UniqueName: \"kubernetes.io/projected/6120d1d1-7abc-494b-8e0f-da1ac9b5324f-kube-api-access-fvmlz\") pod \"nmstate-metrics-5dcf9c57c5-fqbmm\" (UID: \"6120d1d1-7abc-494b-8e0f-da1ac9b5324f\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fqbmm" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.496354 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dlkg\" (UniqueName: \"kubernetes.io/projected/ee71ff08-0d60-4527-8892-b804feba7c02-kube-api-access-8dlkg\") pod \"nmstate-webhook-6b89b748d8-629l4\" (UID: \"ee71ff08-0d60-4527-8892-b804feba7c02\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.498305 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74gqc\" (UniqueName: \"kubernetes.io/projected/b4293598-b73e-407c-a146-3dcc03673ff6-kube-api-access-74gqc\") pod \"nmstate-handler-gbvvs\" (UID: \"b4293598-b73e-407c-a146-3dcc03673ff6\") " pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.576373 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/3d4b604f-f606-4565-9094-d61a6c3275f1-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-krpt4\" (UID: \"3d4b604f-f606-4565-9094-d61a6c3275f1\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.576469 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/3d4b604f-f606-4565-9094-d61a6c3275f1-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-krpt4\" (UID: \"3d4b604f-f606-4565-9094-d61a6c3275f1\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.576514 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q64mg\" (UniqueName: \"kubernetes.io/projected/3d4b604f-f606-4565-9094-d61a6c3275f1-kube-api-access-q64mg\") pod \"nmstate-console-plugin-5874bd7bc5-krpt4\" (UID: \"3d4b604f-f606-4565-9094-d61a6c3275f1\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" Nov 25 15:31:50 crc kubenswrapper[4800]: E1125 15:31:50.576604 4800 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 25 15:31:50 crc kubenswrapper[4800]: E1125 15:31:50.576689 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3d4b604f-f606-4565-9094-d61a6c3275f1-plugin-serving-cert podName:3d4b604f-f606-4565-9094-d61a6c3275f1 nodeName:}" failed. No retries permitted until 2025-11-25 15:31:51.076669367 +0000 UTC m=+872.131077859 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/3d4b604f-f606-4565-9094-d61a6c3275f1-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-krpt4" (UID: "3d4b604f-f606-4565-9094-d61a6c3275f1") : secret "plugin-serving-cert" not found Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.577759 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/3d4b604f-f606-4565-9094-d61a6c3275f1-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-krpt4\" (UID: \"3d4b604f-f606-4565-9094-d61a6c3275f1\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.591413 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fqbmm" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.598008 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q64mg\" (UniqueName: \"kubernetes.io/projected/3d4b604f-f606-4565-9094-d61a6c3275f1-kube-api-access-q64mg\") pod \"nmstate-console-plugin-5874bd7bc5-krpt4\" (UID: \"3d4b604f-f606-4565-9094-d61a6c3275f1\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.607147 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-589846d5d7-qkl99"] Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.608362 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.628271 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.638414 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-589846d5d7-qkl99"] Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.677555 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-console-oauth-config\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.677634 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-console-config\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.677668 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-service-ca\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.677741 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-trusted-ca-bundle\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.677765 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-oauth-serving-cert\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.678149 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-console-serving-cert\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.678198 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6f8vz\" (UniqueName: \"kubernetes.io/projected/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-kube-api-access-6f8vz\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: W1125 15:31:50.679877 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb4293598_b73e_407c_a146_3dcc03673ff6.slice/crio-5c87f516e5a4efab4086660cf6032c092e1fcdad941fe7f76256953443d71727 WatchSource:0}: Error finding container 5c87f516e5a4efab4086660cf6032c092e1fcdad941fe7f76256953443d71727: Status 404 returned error can't find the container with id 5c87f516e5a4efab4086660cf6032c092e1fcdad941fe7f76256953443d71727 Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.779024 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-console-serving-cert\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.779412 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6f8vz\" (UniqueName: \"kubernetes.io/projected/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-kube-api-access-6f8vz\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.779435 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-console-oauth-config\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.779467 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-console-config\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.779486 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-service-ca\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.779539 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-trusted-ca-bundle\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.779557 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-oauth-serving-cert\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.780343 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-oauth-serving-cert\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.784387 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-console-serving-cert\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.786743 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-service-ca\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.787361 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-console-config\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.788561 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-console-oauth-config\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.791346 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-trusted-ca-bundle\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.804180 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6f8vz\" (UniqueName: \"kubernetes.io/projected/9be08821-d73b-4dc7-bf5b-b0dcd35f24b5-kube-api-access-6f8vz\") pod \"console-589846d5d7-qkl99\" (UID: \"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5\") " pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.980055 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.981530 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/ee71ff08-0d60-4527-8892-b804feba7c02-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-629l4\" (UID: \"ee71ff08-0d60-4527-8892-b804feba7c02\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" Nov 25 15:31:50 crc kubenswrapper[4800]: I1125 15:31:50.987000 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/ee71ff08-0d60-4527-8892-b804feba7c02-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-629l4\" (UID: \"ee71ff08-0d60-4527-8892-b804feba7c02\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" Nov 25 15:31:51 crc kubenswrapper[4800]: I1125 15:31:51.014785 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fqbmm"] Nov 25 15:31:51 crc kubenswrapper[4800]: I1125 15:31:51.072491 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fqbmm" event={"ID":"6120d1d1-7abc-494b-8e0f-da1ac9b5324f","Type":"ContainerStarted","Data":"609b044c4c596b3575670fe077e9e9f79de9942a44b53ca4ada9290fb6c58cb3"} Nov 25 15:31:51 crc kubenswrapper[4800]: I1125 15:31:51.073683 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-gbvvs" event={"ID":"b4293598-b73e-407c-a146-3dcc03673ff6","Type":"ContainerStarted","Data":"5c87f516e5a4efab4086660cf6032c092e1fcdad941fe7f76256953443d71727"} Nov 25 15:31:51 crc kubenswrapper[4800]: I1125 15:31:51.083456 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/3d4b604f-f606-4565-9094-d61a6c3275f1-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-krpt4\" (UID: \"3d4b604f-f606-4565-9094-d61a6c3275f1\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" Nov 25 15:31:51 crc kubenswrapper[4800]: I1125 15:31:51.088378 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/3d4b604f-f606-4565-9094-d61a6c3275f1-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-krpt4\" (UID: \"3d4b604f-f606-4565-9094-d61a6c3275f1\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" Nov 25 15:31:51 crc kubenswrapper[4800]: I1125 15:31:51.165886 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-589846d5d7-qkl99"] Nov 25 15:31:51 crc kubenswrapper[4800]: I1125 15:31:51.204073 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" Nov 25 15:31:51 crc kubenswrapper[4800]: I1125 15:31:51.374230 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" Nov 25 15:31:51 crc kubenswrapper[4800]: I1125 15:31:51.419057 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-629l4"] Nov 25 15:31:51 crc kubenswrapper[4800]: W1125 15:31:51.443324 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podee71ff08_0d60_4527_8892_b804feba7c02.slice/crio-33a56d7cc2212afc6fe2b7ea6fb29f6b7dfb65a2e06d4be85817821d84457a64 WatchSource:0}: Error finding container 33a56d7cc2212afc6fe2b7ea6fb29f6b7dfb65a2e06d4be85817821d84457a64: Status 404 returned error can't find the container with id 33a56d7cc2212afc6fe2b7ea6fb29f6b7dfb65a2e06d4be85817821d84457a64 Nov 25 15:31:51 crc kubenswrapper[4800]: I1125 15:31:51.605660 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4"] Nov 25 15:31:52 crc kubenswrapper[4800]: I1125 15:31:52.093296 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" event={"ID":"3d4b604f-f606-4565-9094-d61a6c3275f1","Type":"ContainerStarted","Data":"0d3c418c91000c37bde1a39c6e170d695f69e68699532a8488603b4e3913cb4e"} Nov 25 15:31:52 crc kubenswrapper[4800]: I1125 15:31:52.094781 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" event={"ID":"ee71ff08-0d60-4527-8892-b804feba7c02","Type":"ContainerStarted","Data":"33a56d7cc2212afc6fe2b7ea6fb29f6b7dfb65a2e06d4be85817821d84457a64"} Nov 25 15:31:52 crc kubenswrapper[4800]: I1125 15:31:52.096716 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-589846d5d7-qkl99" event={"ID":"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5","Type":"ContainerStarted","Data":"9299f2c14c5b315a0da404d2d73aef2495efec3d48b3c5833b045c0d8823aa40"} Nov 25 15:31:52 crc kubenswrapper[4800]: I1125 15:31:52.096764 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-589846d5d7-qkl99" event={"ID":"9be08821-d73b-4dc7-bf5b-b0dcd35f24b5","Type":"ContainerStarted","Data":"2f2fd08afad1f71019c6c126e35684b48dfea16b6aa61f340c7e008f41a07dad"} Nov 25 15:31:52 crc kubenswrapper[4800]: I1125 15:31:52.119699 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-589846d5d7-qkl99" podStartSLOduration=2.119673644 podStartE2EDuration="2.119673644s" podCreationTimestamp="2025-11-25 15:31:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:31:52.117162705 +0000 UTC m=+873.171571187" watchObservedRunningTime="2025-11-25 15:31:52.119673644 +0000 UTC m=+873.174082126" Nov 25 15:31:54 crc kubenswrapper[4800]: I1125 15:31:54.117315 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fqbmm" event={"ID":"6120d1d1-7abc-494b-8e0f-da1ac9b5324f","Type":"ContainerStarted","Data":"dd2ed60ae913318e66ab282a107c1433850cb1326749139ada1895061724b787"} Nov 25 15:31:54 crc kubenswrapper[4800]: I1125 15:31:54.119795 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" event={"ID":"ee71ff08-0d60-4527-8892-b804feba7c02","Type":"ContainerStarted","Data":"f812530e16ab6e69ec0541421037690c7d12e2fbb866f192338472b3e8719694"} Nov 25 15:31:54 crc kubenswrapper[4800]: I1125 15:31:54.119924 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" Nov 25 15:31:54 crc kubenswrapper[4800]: I1125 15:31:54.122065 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-gbvvs" event={"ID":"b4293598-b73e-407c-a146-3dcc03673ff6","Type":"ContainerStarted","Data":"ed92138dcc84af53ca0f7cd92bf8ca181b92576deb7aae8055dee5467d2d32d6"} Nov 25 15:31:54 crc kubenswrapper[4800]: I1125 15:31:54.122248 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:31:54 crc kubenswrapper[4800]: I1125 15:31:54.140675 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" podStartSLOduration=2.018378787 podStartE2EDuration="4.140642862s" podCreationTimestamp="2025-11-25 15:31:50 +0000 UTC" firstStartedPulling="2025-11-25 15:31:51.447559969 +0000 UTC m=+872.501968451" lastFinishedPulling="2025-11-25 15:31:53.569824044 +0000 UTC m=+874.624232526" observedRunningTime="2025-11-25 15:31:54.135906654 +0000 UTC m=+875.190315136" watchObservedRunningTime="2025-11-25 15:31:54.140642862 +0000 UTC m=+875.195051344" Nov 25 15:31:54 crc kubenswrapper[4800]: I1125 15:31:54.154816 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-gbvvs" podStartSLOduration=1.285598914 podStartE2EDuration="4.154763647s" podCreationTimestamp="2025-11-25 15:31:50 +0000 UTC" firstStartedPulling="2025-11-25 15:31:50.682562348 +0000 UTC m=+871.736970830" lastFinishedPulling="2025-11-25 15:31:53.551727081 +0000 UTC m=+874.606135563" observedRunningTime="2025-11-25 15:31:54.151321963 +0000 UTC m=+875.205730445" watchObservedRunningTime="2025-11-25 15:31:54.154763647 +0000 UTC m=+875.209172129" Nov 25 15:31:55 crc kubenswrapper[4800]: I1125 15:31:55.132053 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" event={"ID":"3d4b604f-f606-4565-9094-d61a6c3275f1","Type":"ContainerStarted","Data":"26c9eaeca69e33c0a22e447521abf4858ca75b903516bfd40c063238e5ef25d7"} Nov 25 15:31:55 crc kubenswrapper[4800]: I1125 15:31:55.152998 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-krpt4" podStartSLOduration=2.217182377 podStartE2EDuration="5.152975653s" podCreationTimestamp="2025-11-25 15:31:50 +0000 UTC" firstStartedPulling="2025-11-25 15:31:51.617707648 +0000 UTC m=+872.672116130" lastFinishedPulling="2025-11-25 15:31:54.553500924 +0000 UTC m=+875.607909406" observedRunningTime="2025-11-25 15:31:55.14774577 +0000 UTC m=+876.202154252" watchObservedRunningTime="2025-11-25 15:31:55.152975653 +0000 UTC m=+876.207384135" Nov 25 15:31:57 crc kubenswrapper[4800]: I1125 15:31:57.144972 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fqbmm" event={"ID":"6120d1d1-7abc-494b-8e0f-da1ac9b5324f","Type":"ContainerStarted","Data":"55f4237512be765a63baab8230c79528052d66e543f35a6622c6b01f42a24163"} Nov 25 15:31:57 crc kubenswrapper[4800]: I1125 15:31:57.160572 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fqbmm" podStartSLOduration=1.889680189 podStartE2EDuration="7.160546869s" podCreationTimestamp="2025-11-25 15:31:50 +0000 UTC" firstStartedPulling="2025-11-25 15:31:51.021011166 +0000 UTC m=+872.075419648" lastFinishedPulling="2025-11-25 15:31:56.291877846 +0000 UTC m=+877.346286328" observedRunningTime="2025-11-25 15:31:57.157966949 +0000 UTC m=+878.212375431" watchObservedRunningTime="2025-11-25 15:31:57.160546869 +0000 UTC m=+878.214955351" Nov 25 15:32:00 crc kubenswrapper[4800]: I1125 15:32:00.650217 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-gbvvs" Nov 25 15:32:00 crc kubenswrapper[4800]: I1125 15:32:00.980724 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:32:00 crc kubenswrapper[4800]: I1125 15:32:00.980794 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:32:00 crc kubenswrapper[4800]: I1125 15:32:00.986340 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:32:01 crc kubenswrapper[4800]: I1125 15:32:01.171529 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-589846d5d7-qkl99" Nov 25 15:32:01 crc kubenswrapper[4800]: I1125 15:32:01.282260 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-sx8kw"] Nov 25 15:32:11 crc kubenswrapper[4800]: I1125 15:32:11.210256 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-629l4" Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.516233 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2fwvl"] Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.527666 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.543036 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2fwvl"] Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.599164 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2bv8\" (UniqueName: \"kubernetes.io/projected/28578f18-346a-4afa-b73d-45b7faee6330-kube-api-access-r2bv8\") pod \"community-operators-2fwvl\" (UID: \"28578f18-346a-4afa-b73d-45b7faee6330\") " pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.599207 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28578f18-346a-4afa-b73d-45b7faee6330-catalog-content\") pod \"community-operators-2fwvl\" (UID: \"28578f18-346a-4afa-b73d-45b7faee6330\") " pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.599245 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28578f18-346a-4afa-b73d-45b7faee6330-utilities\") pod \"community-operators-2fwvl\" (UID: \"28578f18-346a-4afa-b73d-45b7faee6330\") " pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.700118 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2bv8\" (UniqueName: \"kubernetes.io/projected/28578f18-346a-4afa-b73d-45b7faee6330-kube-api-access-r2bv8\") pod \"community-operators-2fwvl\" (UID: \"28578f18-346a-4afa-b73d-45b7faee6330\") " pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.700192 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28578f18-346a-4afa-b73d-45b7faee6330-catalog-content\") pod \"community-operators-2fwvl\" (UID: \"28578f18-346a-4afa-b73d-45b7faee6330\") " pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.700227 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28578f18-346a-4afa-b73d-45b7faee6330-utilities\") pod \"community-operators-2fwvl\" (UID: \"28578f18-346a-4afa-b73d-45b7faee6330\") " pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.701086 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28578f18-346a-4afa-b73d-45b7faee6330-utilities\") pod \"community-operators-2fwvl\" (UID: \"28578f18-346a-4afa-b73d-45b7faee6330\") " pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.701141 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28578f18-346a-4afa-b73d-45b7faee6330-catalog-content\") pod \"community-operators-2fwvl\" (UID: \"28578f18-346a-4afa-b73d-45b7faee6330\") " pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.718303 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2bv8\" (UniqueName: \"kubernetes.io/projected/28578f18-346a-4afa-b73d-45b7faee6330-kube-api-access-r2bv8\") pod \"community-operators-2fwvl\" (UID: \"28578f18-346a-4afa-b73d-45b7faee6330\") " pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:18 crc kubenswrapper[4800]: I1125 15:32:18.849713 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:19 crc kubenswrapper[4800]: I1125 15:32:19.180801 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2fwvl"] Nov 25 15:32:19 crc kubenswrapper[4800]: I1125 15:32:19.283421 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2fwvl" event={"ID":"28578f18-346a-4afa-b73d-45b7faee6330","Type":"ContainerStarted","Data":"8cfbd7a300c51732c7d460b607f4c36b7e1a617879eea00330d4c925107acd07"} Nov 25 15:32:20 crc kubenswrapper[4800]: I1125 15:32:20.290787 4800 generic.go:334] "Generic (PLEG): container finished" podID="28578f18-346a-4afa-b73d-45b7faee6330" containerID="63e7632122c764959cbc37b40c0b1264d9041e6dacff49e30121091e8e9f201a" exitCode=0 Nov 25 15:32:20 crc kubenswrapper[4800]: I1125 15:32:20.290862 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2fwvl" event={"ID":"28578f18-346a-4afa-b73d-45b7faee6330","Type":"ContainerDied","Data":"63e7632122c764959cbc37b40c0b1264d9041e6dacff49e30121091e8e9f201a"} Nov 25 15:32:23 crc kubenswrapper[4800]: I1125 15:32:23.926625 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt"] Nov 25 15:32:23 crc kubenswrapper[4800]: I1125 15:32:23.929410 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:23 crc kubenswrapper[4800]: I1125 15:32:23.932314 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 15:32:23 crc kubenswrapper[4800]: I1125 15:32:23.936632 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt"] Nov 25 15:32:24 crc kubenswrapper[4800]: I1125 15:32:24.071317 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e59677c5-d37c-41e3-a083-2102f5e79f5d-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt\" (UID: \"e59677c5-d37c-41e3-a083-2102f5e79f5d\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:24 crc kubenswrapper[4800]: I1125 15:32:24.071411 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e59677c5-d37c-41e3-a083-2102f5e79f5d-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt\" (UID: \"e59677c5-d37c-41e3-a083-2102f5e79f5d\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:24 crc kubenswrapper[4800]: I1125 15:32:24.071656 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wnfr\" (UniqueName: \"kubernetes.io/projected/e59677c5-d37c-41e3-a083-2102f5e79f5d-kube-api-access-2wnfr\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt\" (UID: \"e59677c5-d37c-41e3-a083-2102f5e79f5d\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:24 crc kubenswrapper[4800]: I1125 15:32:24.172670 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e59677c5-d37c-41e3-a083-2102f5e79f5d-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt\" (UID: \"e59677c5-d37c-41e3-a083-2102f5e79f5d\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:24 crc kubenswrapper[4800]: I1125 15:32:24.172741 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wnfr\" (UniqueName: \"kubernetes.io/projected/e59677c5-d37c-41e3-a083-2102f5e79f5d-kube-api-access-2wnfr\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt\" (UID: \"e59677c5-d37c-41e3-a083-2102f5e79f5d\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:24 crc kubenswrapper[4800]: I1125 15:32:24.172794 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e59677c5-d37c-41e3-a083-2102f5e79f5d-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt\" (UID: \"e59677c5-d37c-41e3-a083-2102f5e79f5d\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:24 crc kubenswrapper[4800]: I1125 15:32:24.173294 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e59677c5-d37c-41e3-a083-2102f5e79f5d-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt\" (UID: \"e59677c5-d37c-41e3-a083-2102f5e79f5d\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:24 crc kubenswrapper[4800]: I1125 15:32:24.173405 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e59677c5-d37c-41e3-a083-2102f5e79f5d-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt\" (UID: \"e59677c5-d37c-41e3-a083-2102f5e79f5d\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:24 crc kubenswrapper[4800]: I1125 15:32:24.194266 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wnfr\" (UniqueName: \"kubernetes.io/projected/e59677c5-d37c-41e3-a083-2102f5e79f5d-kube-api-access-2wnfr\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt\" (UID: \"e59677c5-d37c-41e3-a083-2102f5e79f5d\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:24 crc kubenswrapper[4800]: I1125 15:32:24.245394 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:25 crc kubenswrapper[4800]: I1125 15:32:25.065215 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt"] Nov 25 15:32:25 crc kubenswrapper[4800]: W1125 15:32:25.079796 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode59677c5_d37c_41e3_a083_2102f5e79f5d.slice/crio-ed9a447086e6591739ce327ac10cac24b42a23ee16e22ab708b02e801cbd1001 WatchSource:0}: Error finding container ed9a447086e6591739ce327ac10cac24b42a23ee16e22ab708b02e801cbd1001: Status 404 returned error can't find the container with id ed9a447086e6591739ce327ac10cac24b42a23ee16e22ab708b02e801cbd1001 Nov 25 15:32:25 crc kubenswrapper[4800]: I1125 15:32:25.319550 4800 generic.go:334] "Generic (PLEG): container finished" podID="28578f18-346a-4afa-b73d-45b7faee6330" containerID="6e1b49ccc513dfbb03490bcb36332b5772694e32dbbe62c6ecb266136850499e" exitCode=0 Nov 25 15:32:25 crc kubenswrapper[4800]: I1125 15:32:25.319647 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2fwvl" event={"ID":"28578f18-346a-4afa-b73d-45b7faee6330","Type":"ContainerDied","Data":"6e1b49ccc513dfbb03490bcb36332b5772694e32dbbe62c6ecb266136850499e"} Nov 25 15:32:25 crc kubenswrapper[4800]: I1125 15:32:25.321018 4800 generic.go:334] "Generic (PLEG): container finished" podID="e59677c5-d37c-41e3-a083-2102f5e79f5d" containerID="69df852bdfcb23bc72b2722d246af753cfdc0b399cba96c19d58c7754bd28829" exitCode=0 Nov 25 15:32:25 crc kubenswrapper[4800]: I1125 15:32:25.321044 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" event={"ID":"e59677c5-d37c-41e3-a083-2102f5e79f5d","Type":"ContainerDied","Data":"69df852bdfcb23bc72b2722d246af753cfdc0b399cba96c19d58c7754bd28829"} Nov 25 15:32:25 crc kubenswrapper[4800]: I1125 15:32:25.321064 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" event={"ID":"e59677c5-d37c-41e3-a083-2102f5e79f5d","Type":"ContainerStarted","Data":"ed9a447086e6591739ce327ac10cac24b42a23ee16e22ab708b02e801cbd1001"} Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.332355 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-sx8kw" podUID="81f88e63-467c-4356-bb2b-b5aa9d93f512" containerName="console" containerID="cri-o://c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527" gracePeriod=15 Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.744920 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-sx8kw_81f88e63-467c-4356-bb2b-b5aa9d93f512/console/0.log" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.745460 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.809380 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-trusted-ca-bundle\") pod \"81f88e63-467c-4356-bb2b-b5aa9d93f512\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.809612 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-serving-cert\") pod \"81f88e63-467c-4356-bb2b-b5aa9d93f512\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.809655 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9pxjv\" (UniqueName: \"kubernetes.io/projected/81f88e63-467c-4356-bb2b-b5aa9d93f512-kube-api-access-9pxjv\") pod \"81f88e63-467c-4356-bb2b-b5aa9d93f512\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.809683 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-config\") pod \"81f88e63-467c-4356-bb2b-b5aa9d93f512\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.809721 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-oauth-config\") pod \"81f88e63-467c-4356-bb2b-b5aa9d93f512\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.809767 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-oauth-serving-cert\") pod \"81f88e63-467c-4356-bb2b-b5aa9d93f512\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.809801 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-service-ca\") pod \"81f88e63-467c-4356-bb2b-b5aa9d93f512\" (UID: \"81f88e63-467c-4356-bb2b-b5aa9d93f512\") " Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.810832 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "81f88e63-467c-4356-bb2b-b5aa9d93f512" (UID: "81f88e63-467c-4356-bb2b-b5aa9d93f512"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.810962 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "81f88e63-467c-4356-bb2b-b5aa9d93f512" (UID: "81f88e63-467c-4356-bb2b-b5aa9d93f512"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.811071 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-service-ca" (OuterVolumeSpecName: "service-ca") pod "81f88e63-467c-4356-bb2b-b5aa9d93f512" (UID: "81f88e63-467c-4356-bb2b-b5aa9d93f512"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.811191 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-config" (OuterVolumeSpecName: "console-config") pod "81f88e63-467c-4356-bb2b-b5aa9d93f512" (UID: "81f88e63-467c-4356-bb2b-b5aa9d93f512"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.816293 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "81f88e63-467c-4356-bb2b-b5aa9d93f512" (UID: "81f88e63-467c-4356-bb2b-b5aa9d93f512"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.818158 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81f88e63-467c-4356-bb2b-b5aa9d93f512-kube-api-access-9pxjv" (OuterVolumeSpecName: "kube-api-access-9pxjv") pod "81f88e63-467c-4356-bb2b-b5aa9d93f512" (UID: "81f88e63-467c-4356-bb2b-b5aa9d93f512"). InnerVolumeSpecName "kube-api-access-9pxjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.818763 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "81f88e63-467c-4356-bb2b-b5aa9d93f512" (UID: "81f88e63-467c-4356-bb2b-b5aa9d93f512"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.912333 4800 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.912379 4800 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.912392 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9pxjv\" (UniqueName: \"kubernetes.io/projected/81f88e63-467c-4356-bb2b-b5aa9d93f512-kube-api-access-9pxjv\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.912404 4800 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.912413 4800 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/81f88e63-467c-4356-bb2b-b5aa9d93f512-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.912423 4800 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:26 crc kubenswrapper[4800]: I1125 15:32:26.912433 4800 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/81f88e63-467c-4356-bb2b-b5aa9d93f512-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.331487 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-sx8kw_81f88e63-467c-4356-bb2b-b5aa9d93f512/console/0.log" Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.331873 4800 generic.go:334] "Generic (PLEG): container finished" podID="81f88e63-467c-4356-bb2b-b5aa9d93f512" containerID="c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527" exitCode=2 Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.331964 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-sx8kw" event={"ID":"81f88e63-467c-4356-bb2b-b5aa9d93f512","Type":"ContainerDied","Data":"c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527"} Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.331983 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-sx8kw" Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.332026 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-sx8kw" event={"ID":"81f88e63-467c-4356-bb2b-b5aa9d93f512","Type":"ContainerDied","Data":"a74f3e9b4c0f99ef2169b20aacbd15d2cf75b03e8f1ddc2c8f9bfb41b5a2dc49"} Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.332053 4800 scope.go:117] "RemoveContainer" containerID="c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527" Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.335724 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2fwvl" event={"ID":"28578f18-346a-4afa-b73d-45b7faee6330","Type":"ContainerStarted","Data":"f39e2bb10f15eef506415bef1ef06b413b0523eab097d06fa28a2c407771e36e"} Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.358239 4800 scope.go:117] "RemoveContainer" containerID="c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527" Nov 25 15:32:27 crc kubenswrapper[4800]: E1125 15:32:27.358707 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527\": container with ID starting with c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527 not found: ID does not exist" containerID="c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527" Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.358731 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527"} err="failed to get container status \"c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527\": rpc error: code = NotFound desc = could not find container \"c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527\": container with ID starting with c048f3014a8bc72c70e0fe8b917dc280f79ce769888d89b7790b3e19f94da527 not found: ID does not exist" Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.359352 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2fwvl" podStartSLOduration=3.213671076 podStartE2EDuration="9.359334025s" podCreationTimestamp="2025-11-25 15:32:18 +0000 UTC" firstStartedPulling="2025-11-25 15:32:20.293157258 +0000 UTC m=+901.347565740" lastFinishedPulling="2025-11-25 15:32:26.438820207 +0000 UTC m=+907.493228689" observedRunningTime="2025-11-25 15:32:27.356413601 +0000 UTC m=+908.410822103" watchObservedRunningTime="2025-11-25 15:32:27.359334025 +0000 UTC m=+908.413742507" Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.376182 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-sx8kw"] Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.381676 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-sx8kw"] Nov 25 15:32:27 crc kubenswrapper[4800]: I1125 15:32:27.792087 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81f88e63-467c-4356-bb2b-b5aa9d93f512" path="/var/lib/kubelet/pods/81f88e63-467c-4356-bb2b-b5aa9d93f512/volumes" Nov 25 15:32:28 crc kubenswrapper[4800]: I1125 15:32:28.346342 4800 generic.go:334] "Generic (PLEG): container finished" podID="e59677c5-d37c-41e3-a083-2102f5e79f5d" containerID="b345094d8072d41f788b1447995959599b02a23057516bd014be2db6179471a8" exitCode=0 Nov 25 15:32:28 crc kubenswrapper[4800]: I1125 15:32:28.346464 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" event={"ID":"e59677c5-d37c-41e3-a083-2102f5e79f5d","Type":"ContainerDied","Data":"b345094d8072d41f788b1447995959599b02a23057516bd014be2db6179471a8"} Nov 25 15:32:28 crc kubenswrapper[4800]: I1125 15:32:28.851636 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:28 crc kubenswrapper[4800]: I1125 15:32:28.852022 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:28 crc kubenswrapper[4800]: I1125 15:32:28.902787 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:29 crc kubenswrapper[4800]: I1125 15:32:29.354274 4800 generic.go:334] "Generic (PLEG): container finished" podID="e59677c5-d37c-41e3-a083-2102f5e79f5d" containerID="591052ee241522121d7aae97cafe03aad9886c4bdda6ed096795b302d1aa5c7e" exitCode=0 Nov 25 15:32:29 crc kubenswrapper[4800]: I1125 15:32:29.354361 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" event={"ID":"e59677c5-d37c-41e3-a083-2102f5e79f5d","Type":"ContainerDied","Data":"591052ee241522121d7aae97cafe03aad9886c4bdda6ed096795b302d1aa5c7e"} Nov 25 15:32:30 crc kubenswrapper[4800]: I1125 15:32:30.568510 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:30 crc kubenswrapper[4800]: I1125 15:32:30.660985 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e59677c5-d37c-41e3-a083-2102f5e79f5d-bundle\") pod \"e59677c5-d37c-41e3-a083-2102f5e79f5d\" (UID: \"e59677c5-d37c-41e3-a083-2102f5e79f5d\") " Nov 25 15:32:30 crc kubenswrapper[4800]: I1125 15:32:30.661538 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e59677c5-d37c-41e3-a083-2102f5e79f5d-util\") pod \"e59677c5-d37c-41e3-a083-2102f5e79f5d\" (UID: \"e59677c5-d37c-41e3-a083-2102f5e79f5d\") " Nov 25 15:32:30 crc kubenswrapper[4800]: I1125 15:32:30.661761 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wnfr\" (UniqueName: \"kubernetes.io/projected/e59677c5-d37c-41e3-a083-2102f5e79f5d-kube-api-access-2wnfr\") pod \"e59677c5-d37c-41e3-a083-2102f5e79f5d\" (UID: \"e59677c5-d37c-41e3-a083-2102f5e79f5d\") " Nov 25 15:32:30 crc kubenswrapper[4800]: I1125 15:32:30.662366 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e59677c5-d37c-41e3-a083-2102f5e79f5d-bundle" (OuterVolumeSpecName: "bundle") pod "e59677c5-d37c-41e3-a083-2102f5e79f5d" (UID: "e59677c5-d37c-41e3-a083-2102f5e79f5d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:32:30 crc kubenswrapper[4800]: I1125 15:32:30.670136 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e59677c5-d37c-41e3-a083-2102f5e79f5d-kube-api-access-2wnfr" (OuterVolumeSpecName: "kube-api-access-2wnfr") pod "e59677c5-d37c-41e3-a083-2102f5e79f5d" (UID: "e59677c5-d37c-41e3-a083-2102f5e79f5d"). InnerVolumeSpecName "kube-api-access-2wnfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:32:30 crc kubenswrapper[4800]: I1125 15:32:30.763275 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wnfr\" (UniqueName: \"kubernetes.io/projected/e59677c5-d37c-41e3-a083-2102f5e79f5d-kube-api-access-2wnfr\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:30 crc kubenswrapper[4800]: I1125 15:32:30.763312 4800 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e59677c5-d37c-41e3-a083-2102f5e79f5d-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:30 crc kubenswrapper[4800]: I1125 15:32:30.899349 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e59677c5-d37c-41e3-a083-2102f5e79f5d-util" (OuterVolumeSpecName: "util") pod "e59677c5-d37c-41e3-a083-2102f5e79f5d" (UID: "e59677c5-d37c-41e3-a083-2102f5e79f5d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:32:30 crc kubenswrapper[4800]: I1125 15:32:30.965806 4800 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e59677c5-d37c-41e3-a083-2102f5e79f5d-util\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:31 crc kubenswrapper[4800]: I1125 15:32:31.367781 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" event={"ID":"e59677c5-d37c-41e3-a083-2102f5e79f5d","Type":"ContainerDied","Data":"ed9a447086e6591739ce327ac10cac24b42a23ee16e22ab708b02e801cbd1001"} Nov 25 15:32:31 crc kubenswrapper[4800]: I1125 15:32:31.367824 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt" Nov 25 15:32:31 crc kubenswrapper[4800]: I1125 15:32:31.367832 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed9a447086e6591739ce327ac10cac24b42a23ee16e22ab708b02e801cbd1001" Nov 25 15:32:31 crc kubenswrapper[4800]: E1125 15:32:31.468234 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode59677c5_d37c_41e3_a083_2102f5e79f5d.slice\": RecentStats: unable to find data in memory cache]" Nov 25 15:32:38 crc kubenswrapper[4800]: I1125 15:32:38.902997 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2fwvl" Nov 25 15:32:40 crc kubenswrapper[4800]: I1125 15:32:40.760142 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2fwvl"] Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.089672 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-59bdq"] Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.090626 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-59bdq" podUID="ae35c255-4d64-4fdd-acd9-a796315307e4" containerName="registry-server" containerID="cri-o://f21138b5baf63126d28dbd05a20135ccebaf5e9759c4e272e6180c78c864c8eb" gracePeriod=2 Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.335350 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88"] Nov 25 15:32:41 crc kubenswrapper[4800]: E1125 15:32:41.335643 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81f88e63-467c-4356-bb2b-b5aa9d93f512" containerName="console" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.335662 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="81f88e63-467c-4356-bb2b-b5aa9d93f512" containerName="console" Nov 25 15:32:41 crc kubenswrapper[4800]: E1125 15:32:41.335679 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e59677c5-d37c-41e3-a083-2102f5e79f5d" containerName="pull" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.335686 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e59677c5-d37c-41e3-a083-2102f5e79f5d" containerName="pull" Nov 25 15:32:41 crc kubenswrapper[4800]: E1125 15:32:41.335707 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e59677c5-d37c-41e3-a083-2102f5e79f5d" containerName="util" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.335715 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e59677c5-d37c-41e3-a083-2102f5e79f5d" containerName="util" Nov 25 15:32:41 crc kubenswrapper[4800]: E1125 15:32:41.335724 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e59677c5-d37c-41e3-a083-2102f5e79f5d" containerName="extract" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.335731 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e59677c5-d37c-41e3-a083-2102f5e79f5d" containerName="extract" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.335883 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="81f88e63-467c-4356-bb2b-b5aa9d93f512" containerName="console" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.335905 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="e59677c5-d37c-41e3-a083-2102f5e79f5d" containerName="extract" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.336401 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.340076 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.340335 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-shv6h" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.340352 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.340882 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.341124 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.369117 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88"] Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.411356 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9r8h\" (UniqueName: \"kubernetes.io/projected/3b8101e4-3103-4602-ba9d-8a43d88566e6-kube-api-access-g9r8h\") pod \"metallb-operator-controller-manager-667b8c5d74-psh88\" (UID: \"3b8101e4-3103-4602-ba9d-8a43d88566e6\") " pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.411455 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3b8101e4-3103-4602-ba9d-8a43d88566e6-apiservice-cert\") pod \"metallb-operator-controller-manager-667b8c5d74-psh88\" (UID: \"3b8101e4-3103-4602-ba9d-8a43d88566e6\") " pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.411606 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3b8101e4-3103-4602-ba9d-8a43d88566e6-webhook-cert\") pod \"metallb-operator-controller-manager-667b8c5d74-psh88\" (UID: \"3b8101e4-3103-4602-ba9d-8a43d88566e6\") " pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.458248 4800 generic.go:334] "Generic (PLEG): container finished" podID="ae35c255-4d64-4fdd-acd9-a796315307e4" containerID="f21138b5baf63126d28dbd05a20135ccebaf5e9759c4e272e6180c78c864c8eb" exitCode=0 Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.458303 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59bdq" event={"ID":"ae35c255-4d64-4fdd-acd9-a796315307e4","Type":"ContainerDied","Data":"f21138b5baf63126d28dbd05a20135ccebaf5e9759c4e272e6180c78c864c8eb"} Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.512454 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3b8101e4-3103-4602-ba9d-8a43d88566e6-webhook-cert\") pod \"metallb-operator-controller-manager-667b8c5d74-psh88\" (UID: \"3b8101e4-3103-4602-ba9d-8a43d88566e6\") " pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.512763 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9r8h\" (UniqueName: \"kubernetes.io/projected/3b8101e4-3103-4602-ba9d-8a43d88566e6-kube-api-access-g9r8h\") pod \"metallb-operator-controller-manager-667b8c5d74-psh88\" (UID: \"3b8101e4-3103-4602-ba9d-8a43d88566e6\") " pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.512800 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3b8101e4-3103-4602-ba9d-8a43d88566e6-apiservice-cert\") pod \"metallb-operator-controller-manager-667b8c5d74-psh88\" (UID: \"3b8101e4-3103-4602-ba9d-8a43d88566e6\") " pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.521870 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3b8101e4-3103-4602-ba9d-8a43d88566e6-webhook-cert\") pod \"metallb-operator-controller-manager-667b8c5d74-psh88\" (UID: \"3b8101e4-3103-4602-ba9d-8a43d88566e6\") " pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.522715 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3b8101e4-3103-4602-ba9d-8a43d88566e6-apiservice-cert\") pod \"metallb-operator-controller-manager-667b8c5d74-psh88\" (UID: \"3b8101e4-3103-4602-ba9d-8a43d88566e6\") " pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.534369 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9r8h\" (UniqueName: \"kubernetes.io/projected/3b8101e4-3103-4602-ba9d-8a43d88566e6-kube-api-access-g9r8h\") pod \"metallb-operator-controller-manager-667b8c5d74-psh88\" (UID: \"3b8101e4-3103-4602-ba9d-8a43d88566e6\") " pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.561343 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.619929 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xlw6\" (UniqueName: \"kubernetes.io/projected/ae35c255-4d64-4fdd-acd9-a796315307e4-kube-api-access-7xlw6\") pod \"ae35c255-4d64-4fdd-acd9-a796315307e4\" (UID: \"ae35c255-4d64-4fdd-acd9-a796315307e4\") " Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.620059 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae35c255-4d64-4fdd-acd9-a796315307e4-catalog-content\") pod \"ae35c255-4d64-4fdd-acd9-a796315307e4\" (UID: \"ae35c255-4d64-4fdd-acd9-a796315307e4\") " Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.620146 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae35c255-4d64-4fdd-acd9-a796315307e4-utilities\") pod \"ae35c255-4d64-4fdd-acd9-a796315307e4\" (UID: \"ae35c255-4d64-4fdd-acd9-a796315307e4\") " Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.622541 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae35c255-4d64-4fdd-acd9-a796315307e4-utilities" (OuterVolumeSpecName: "utilities") pod "ae35c255-4d64-4fdd-acd9-a796315307e4" (UID: "ae35c255-4d64-4fdd-acd9-a796315307e4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.644192 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae35c255-4d64-4fdd-acd9-a796315307e4-kube-api-access-7xlw6" (OuterVolumeSpecName: "kube-api-access-7xlw6") pod "ae35c255-4d64-4fdd-acd9-a796315307e4" (UID: "ae35c255-4d64-4fdd-acd9-a796315307e4"). InnerVolumeSpecName "kube-api-access-7xlw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.650801 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.686779 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae35c255-4d64-4fdd-acd9-a796315307e4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ae35c255-4d64-4fdd-acd9-a796315307e4" (UID: "ae35c255-4d64-4fdd-acd9-a796315307e4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.717548 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5"] Nov 25 15:32:41 crc kubenswrapper[4800]: E1125 15:32:41.722869 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae35c255-4d64-4fdd-acd9-a796315307e4" containerName="registry-server" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.722891 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae35c255-4d64-4fdd-acd9-a796315307e4" containerName="registry-server" Nov 25 15:32:41 crc kubenswrapper[4800]: E1125 15:32:41.722911 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae35c255-4d64-4fdd-acd9-a796315307e4" containerName="extract-content" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.722919 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae35c255-4d64-4fdd-acd9-a796315307e4" containerName="extract-content" Nov 25 15:32:41 crc kubenswrapper[4800]: E1125 15:32:41.722958 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae35c255-4d64-4fdd-acd9-a796315307e4" containerName="extract-utilities" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.722968 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae35c255-4d64-4fdd-acd9-a796315307e4" containerName="extract-utilities" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.723098 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae35c255-4d64-4fdd-acd9-a796315307e4" containerName="registry-server" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.723541 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.723692 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae35c255-4d64-4fdd-acd9-a796315307e4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.723737 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae35c255-4d64-4fdd-acd9-a796315307e4-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.723749 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xlw6\" (UniqueName: \"kubernetes.io/projected/ae35c255-4d64-4fdd-acd9-a796315307e4-kube-api-access-7xlw6\") on node \"crc\" DevicePath \"\"" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.727606 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-wq4c6" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.727855 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.728400 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.747907 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5"] Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.824815 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8d07d578-289f-40fa-9e41-fc065151089c-webhook-cert\") pod \"metallb-operator-webhook-server-7c85cdc97d-mz6j5\" (UID: \"8d07d578-289f-40fa-9e41-fc065151089c\") " pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.824931 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8d07d578-289f-40fa-9e41-fc065151089c-apiservice-cert\") pod \"metallb-operator-webhook-server-7c85cdc97d-mz6j5\" (UID: \"8d07d578-289f-40fa-9e41-fc065151089c\") " pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.824960 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq8c8\" (UniqueName: \"kubernetes.io/projected/8d07d578-289f-40fa-9e41-fc065151089c-kube-api-access-fq8c8\") pod \"metallb-operator-webhook-server-7c85cdc97d-mz6j5\" (UID: \"8d07d578-289f-40fa-9e41-fc065151089c\") " pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.925820 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq8c8\" (UniqueName: \"kubernetes.io/projected/8d07d578-289f-40fa-9e41-fc065151089c-kube-api-access-fq8c8\") pod \"metallb-operator-webhook-server-7c85cdc97d-mz6j5\" (UID: \"8d07d578-289f-40fa-9e41-fc065151089c\") " pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.926307 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8d07d578-289f-40fa-9e41-fc065151089c-webhook-cert\") pod \"metallb-operator-webhook-server-7c85cdc97d-mz6j5\" (UID: \"8d07d578-289f-40fa-9e41-fc065151089c\") " pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.926398 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8d07d578-289f-40fa-9e41-fc065151089c-apiservice-cert\") pod \"metallb-operator-webhook-server-7c85cdc97d-mz6j5\" (UID: \"8d07d578-289f-40fa-9e41-fc065151089c\") " pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.932457 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8d07d578-289f-40fa-9e41-fc065151089c-apiservice-cert\") pod \"metallb-operator-webhook-server-7c85cdc97d-mz6j5\" (UID: \"8d07d578-289f-40fa-9e41-fc065151089c\") " pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.941725 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8d07d578-289f-40fa-9e41-fc065151089c-webhook-cert\") pod \"metallb-operator-webhook-server-7c85cdc97d-mz6j5\" (UID: \"8d07d578-289f-40fa-9e41-fc065151089c\") " pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:32:41 crc kubenswrapper[4800]: I1125 15:32:41.960766 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq8c8\" (UniqueName: \"kubernetes.io/projected/8d07d578-289f-40fa-9e41-fc065151089c-kube-api-access-fq8c8\") pod \"metallb-operator-webhook-server-7c85cdc97d-mz6j5\" (UID: \"8d07d578-289f-40fa-9e41-fc065151089c\") " pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:32:42 crc kubenswrapper[4800]: I1125 15:32:42.057362 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:32:42 crc kubenswrapper[4800]: I1125 15:32:42.259189 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88"] Nov 25 15:32:42 crc kubenswrapper[4800]: I1125 15:32:42.368129 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5"] Nov 25 15:32:42 crc kubenswrapper[4800]: I1125 15:32:42.465255 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59bdq" event={"ID":"ae35c255-4d64-4fdd-acd9-a796315307e4","Type":"ContainerDied","Data":"e929384519ba3ff1a9d0d6f1adbd99b2fbef6ffb3af903d55a3bf8746f17dc47"} Nov 25 15:32:42 crc kubenswrapper[4800]: I1125 15:32:42.465298 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59bdq" Nov 25 15:32:42 crc kubenswrapper[4800]: I1125 15:32:42.465313 4800 scope.go:117] "RemoveContainer" containerID="f21138b5baf63126d28dbd05a20135ccebaf5e9759c4e272e6180c78c864c8eb" Nov 25 15:32:42 crc kubenswrapper[4800]: I1125 15:32:42.466799 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" event={"ID":"3b8101e4-3103-4602-ba9d-8a43d88566e6","Type":"ContainerStarted","Data":"a33a4342cd461b3d7bc0801a26d714c761c29b066989c10c703e2f6fed0312a8"} Nov 25 15:32:42 crc kubenswrapper[4800]: I1125 15:32:42.468433 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" event={"ID":"8d07d578-289f-40fa-9e41-fc065151089c","Type":"ContainerStarted","Data":"758fd99057a7d1c9a2221d8264ae6e8410b7f028cf713948325524f9aa96e50b"} Nov 25 15:32:42 crc kubenswrapper[4800]: I1125 15:32:42.482243 4800 scope.go:117] "RemoveContainer" containerID="ab3e3917b48a8777c81e38e118ec5a2499ddde6d2dbc8893681269b01750a795" Nov 25 15:32:42 crc kubenswrapper[4800]: I1125 15:32:42.489808 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-59bdq"] Nov 25 15:32:42 crc kubenswrapper[4800]: I1125 15:32:42.494207 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-59bdq"] Nov 25 15:32:42 crc kubenswrapper[4800]: I1125 15:32:42.498893 4800 scope.go:117] "RemoveContainer" containerID="6b73391ebff03a1b71da971bbf828c149a906956fb2747d0d3ed4c43fb9b1a44" Nov 25 15:32:43 crc kubenswrapper[4800]: I1125 15:32:43.796192 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae35c255-4d64-4fdd-acd9-a796315307e4" path="/var/lib/kubelet/pods/ae35c255-4d64-4fdd-acd9-a796315307e4/volumes" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.514287 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" event={"ID":"3b8101e4-3103-4602-ba9d-8a43d88566e6","Type":"ContainerStarted","Data":"5aaa1b8d34281110ffbd7b8d2767d1b3de1599d53f9c0bbb4e4ff897c7a7f5d5"} Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.515246 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.517864 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" event={"ID":"8d07d578-289f-40fa-9e41-fc065151089c","Type":"ContainerStarted","Data":"38ef4fd7dc4973505162530aaa2ebe4f13e15b0cdd9a5d640496fd9b5e690681"} Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.518345 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.568509 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" podStartSLOduration=1.71774385 podStartE2EDuration="7.568479297s" podCreationTimestamp="2025-11-25 15:32:41 +0000 UTC" firstStartedPulling="2025-11-25 15:32:42.378744816 +0000 UTC m=+923.433153298" lastFinishedPulling="2025-11-25 15:32:48.229480253 +0000 UTC m=+929.283888745" observedRunningTime="2025-11-25 15:32:48.563178273 +0000 UTC m=+929.617586755" watchObservedRunningTime="2025-11-25 15:32:48.568479297 +0000 UTC m=+929.622887799" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.569095 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" podStartSLOduration=1.647255125 podStartE2EDuration="7.569085733s" podCreationTimestamp="2025-11-25 15:32:41 +0000 UTC" firstStartedPulling="2025-11-25 15:32:42.291500107 +0000 UTC m=+923.345908589" lastFinishedPulling="2025-11-25 15:32:48.213330715 +0000 UTC m=+929.267739197" observedRunningTime="2025-11-25 15:32:48.540058187 +0000 UTC m=+929.594466669" watchObservedRunningTime="2025-11-25 15:32:48.569085733 +0000 UTC m=+929.623494215" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.714770 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dnwn4"] Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.715835 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.735905 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dnwn4"] Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.820880 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c957503-1457-4a7b-a107-ed2135746cec-catalog-content\") pod \"certified-operators-dnwn4\" (UID: \"8c957503-1457-4a7b-a107-ed2135746cec\") " pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.820935 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c957503-1457-4a7b-a107-ed2135746cec-utilities\") pod \"certified-operators-dnwn4\" (UID: \"8c957503-1457-4a7b-a107-ed2135746cec\") " pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.821037 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7vs7\" (UniqueName: \"kubernetes.io/projected/8c957503-1457-4a7b-a107-ed2135746cec-kube-api-access-n7vs7\") pod \"certified-operators-dnwn4\" (UID: \"8c957503-1457-4a7b-a107-ed2135746cec\") " pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.922657 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7vs7\" (UniqueName: \"kubernetes.io/projected/8c957503-1457-4a7b-a107-ed2135746cec-kube-api-access-n7vs7\") pod \"certified-operators-dnwn4\" (UID: \"8c957503-1457-4a7b-a107-ed2135746cec\") " pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.922752 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c957503-1457-4a7b-a107-ed2135746cec-catalog-content\") pod \"certified-operators-dnwn4\" (UID: \"8c957503-1457-4a7b-a107-ed2135746cec\") " pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.922779 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c957503-1457-4a7b-a107-ed2135746cec-utilities\") pod \"certified-operators-dnwn4\" (UID: \"8c957503-1457-4a7b-a107-ed2135746cec\") " pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.923290 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c957503-1457-4a7b-a107-ed2135746cec-utilities\") pod \"certified-operators-dnwn4\" (UID: \"8c957503-1457-4a7b-a107-ed2135746cec\") " pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.923535 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c957503-1457-4a7b-a107-ed2135746cec-catalog-content\") pod \"certified-operators-dnwn4\" (UID: \"8c957503-1457-4a7b-a107-ed2135746cec\") " pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:48 crc kubenswrapper[4800]: I1125 15:32:48.954220 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7vs7\" (UniqueName: \"kubernetes.io/projected/8c957503-1457-4a7b-a107-ed2135746cec-kube-api-access-n7vs7\") pod \"certified-operators-dnwn4\" (UID: \"8c957503-1457-4a7b-a107-ed2135746cec\") " pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:49 crc kubenswrapper[4800]: I1125 15:32:49.034270 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:49 crc kubenswrapper[4800]: I1125 15:32:49.347752 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dnwn4"] Nov 25 15:32:49 crc kubenswrapper[4800]: I1125 15:32:49.527262 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnwn4" event={"ID":"8c957503-1457-4a7b-a107-ed2135746cec","Type":"ContainerStarted","Data":"33a8686d737718e5a8de47cb319bdaf3a905db824c5d2bee330496e292c156e5"} Nov 25 15:32:49 crc kubenswrapper[4800]: I1125 15:32:49.527319 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnwn4" event={"ID":"8c957503-1457-4a7b-a107-ed2135746cec","Type":"ContainerStarted","Data":"170aa653964f9fef191db2098f765ea251f9de4b7659676c36f0ff1c9b1ea661"} Nov 25 15:32:50 crc kubenswrapper[4800]: I1125 15:32:50.533758 4800 generic.go:334] "Generic (PLEG): container finished" podID="8c957503-1457-4a7b-a107-ed2135746cec" containerID="33a8686d737718e5a8de47cb319bdaf3a905db824c5d2bee330496e292c156e5" exitCode=0 Nov 25 15:32:50 crc kubenswrapper[4800]: I1125 15:32:50.533827 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnwn4" event={"ID":"8c957503-1457-4a7b-a107-ed2135746cec","Type":"ContainerDied","Data":"33a8686d737718e5a8de47cb319bdaf3a905db824c5d2bee330496e292c156e5"} Nov 25 15:32:53 crc kubenswrapper[4800]: I1125 15:32:53.553403 4800 generic.go:334] "Generic (PLEG): container finished" podID="8c957503-1457-4a7b-a107-ed2135746cec" containerID="bdc8607027b2bb06fe1d8f99aefb0cad840c65ec0896e5f425ddd639a4382f04" exitCode=0 Nov 25 15:32:53 crc kubenswrapper[4800]: I1125 15:32:53.553526 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnwn4" event={"ID":"8c957503-1457-4a7b-a107-ed2135746cec","Type":"ContainerDied","Data":"bdc8607027b2bb06fe1d8f99aefb0cad840c65ec0896e5f425ddd639a4382f04"} Nov 25 15:32:54 crc kubenswrapper[4800]: I1125 15:32:54.560795 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnwn4" event={"ID":"8c957503-1457-4a7b-a107-ed2135746cec","Type":"ContainerStarted","Data":"2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2"} Nov 25 15:32:54 crc kubenswrapper[4800]: I1125 15:32:54.585583 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dnwn4" podStartSLOduration=2.856789399 podStartE2EDuration="6.585566777s" podCreationTimestamp="2025-11-25 15:32:48 +0000 UTC" firstStartedPulling="2025-11-25 15:32:50.535593197 +0000 UTC m=+931.590001679" lastFinishedPulling="2025-11-25 15:32:54.264370575 +0000 UTC m=+935.318779057" observedRunningTime="2025-11-25 15:32:54.581427733 +0000 UTC m=+935.635836215" watchObservedRunningTime="2025-11-25 15:32:54.585566777 +0000 UTC m=+935.639975259" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.035037 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.036051 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.101043 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.522782 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xspgj"] Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.528499 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.533889 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xspgj"] Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.587526 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdt8x\" (UniqueName: \"kubernetes.io/projected/4d1a5a68-892b-4837-8556-d52fff52c662-kube-api-access-cdt8x\") pod \"redhat-marketplace-xspgj\" (UID: \"4d1a5a68-892b-4837-8556-d52fff52c662\") " pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.587940 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d1a5a68-892b-4837-8556-d52fff52c662-utilities\") pod \"redhat-marketplace-xspgj\" (UID: \"4d1a5a68-892b-4837-8556-d52fff52c662\") " pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.588034 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d1a5a68-892b-4837-8556-d52fff52c662-catalog-content\") pod \"redhat-marketplace-xspgj\" (UID: \"4d1a5a68-892b-4837-8556-d52fff52c662\") " pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.650751 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.689622 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdt8x\" (UniqueName: \"kubernetes.io/projected/4d1a5a68-892b-4837-8556-d52fff52c662-kube-api-access-cdt8x\") pod \"redhat-marketplace-xspgj\" (UID: \"4d1a5a68-892b-4837-8556-d52fff52c662\") " pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.689758 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d1a5a68-892b-4837-8556-d52fff52c662-utilities\") pod \"redhat-marketplace-xspgj\" (UID: \"4d1a5a68-892b-4837-8556-d52fff52c662\") " pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.689799 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d1a5a68-892b-4837-8556-d52fff52c662-catalog-content\") pod \"redhat-marketplace-xspgj\" (UID: \"4d1a5a68-892b-4837-8556-d52fff52c662\") " pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.694762 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d1a5a68-892b-4837-8556-d52fff52c662-catalog-content\") pod \"redhat-marketplace-xspgj\" (UID: \"4d1a5a68-892b-4837-8556-d52fff52c662\") " pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.695593 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d1a5a68-892b-4837-8556-d52fff52c662-utilities\") pod \"redhat-marketplace-xspgj\" (UID: \"4d1a5a68-892b-4837-8556-d52fff52c662\") " pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.717321 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdt8x\" (UniqueName: \"kubernetes.io/projected/4d1a5a68-892b-4837-8556-d52fff52c662-kube-api-access-cdt8x\") pod \"redhat-marketplace-xspgj\" (UID: \"4d1a5a68-892b-4837-8556-d52fff52c662\") " pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:32:59 crc kubenswrapper[4800]: I1125 15:32:59.857375 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:33:00 crc kubenswrapper[4800]: I1125 15:33:00.181648 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xspgj"] Nov 25 15:33:00 crc kubenswrapper[4800]: W1125 15:33:00.189244 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4d1a5a68_892b_4837_8556_d52fff52c662.slice/crio-30ad37cf6789da6b590e33ecd1c5de47fd84be75413d57232f65bc6c009de36d WatchSource:0}: Error finding container 30ad37cf6789da6b590e33ecd1c5de47fd84be75413d57232f65bc6c009de36d: Status 404 returned error can't find the container with id 30ad37cf6789da6b590e33ecd1c5de47fd84be75413d57232f65bc6c009de36d Nov 25 15:33:00 crc kubenswrapper[4800]: I1125 15:33:00.604900 4800 generic.go:334] "Generic (PLEG): container finished" podID="4d1a5a68-892b-4837-8556-d52fff52c662" containerID="28552f5492a0731c10d51222fce39d8da18f73bf0c7a537cf9e6691a67af1712" exitCode=0 Nov 25 15:33:00 crc kubenswrapper[4800]: I1125 15:33:00.605009 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xspgj" event={"ID":"4d1a5a68-892b-4837-8556-d52fff52c662","Type":"ContainerDied","Data":"28552f5492a0731c10d51222fce39d8da18f73bf0c7a537cf9e6691a67af1712"} Nov 25 15:33:00 crc kubenswrapper[4800]: I1125 15:33:00.605084 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xspgj" event={"ID":"4d1a5a68-892b-4837-8556-d52fff52c662","Type":"ContainerStarted","Data":"30ad37cf6789da6b590e33ecd1c5de47fd84be75413d57232f65bc6c009de36d"} Nov 25 15:33:01 crc kubenswrapper[4800]: I1125 15:33:01.882277 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dnwn4"] Nov 25 15:33:01 crc kubenswrapper[4800]: I1125 15:33:01.884130 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dnwn4" podUID="8c957503-1457-4a7b-a107-ed2135746cec" containerName="registry-server" containerID="cri-o://2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2" gracePeriod=2 Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.065303 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-7c85cdc97d-mz6j5" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.296195 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.340356 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c957503-1457-4a7b-a107-ed2135746cec-utilities\") pod \"8c957503-1457-4a7b-a107-ed2135746cec\" (UID: \"8c957503-1457-4a7b-a107-ed2135746cec\") " Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.340552 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c957503-1457-4a7b-a107-ed2135746cec-catalog-content\") pod \"8c957503-1457-4a7b-a107-ed2135746cec\" (UID: \"8c957503-1457-4a7b-a107-ed2135746cec\") " Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.340730 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7vs7\" (UniqueName: \"kubernetes.io/projected/8c957503-1457-4a7b-a107-ed2135746cec-kube-api-access-n7vs7\") pod \"8c957503-1457-4a7b-a107-ed2135746cec\" (UID: \"8c957503-1457-4a7b-a107-ed2135746cec\") " Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.341729 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c957503-1457-4a7b-a107-ed2135746cec-utilities" (OuterVolumeSpecName: "utilities") pod "8c957503-1457-4a7b-a107-ed2135746cec" (UID: "8c957503-1457-4a7b-a107-ed2135746cec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.342441 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c957503-1457-4a7b-a107-ed2135746cec-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.348289 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c957503-1457-4a7b-a107-ed2135746cec-kube-api-access-n7vs7" (OuterVolumeSpecName: "kube-api-access-n7vs7") pod "8c957503-1457-4a7b-a107-ed2135746cec" (UID: "8c957503-1457-4a7b-a107-ed2135746cec"). InnerVolumeSpecName "kube-api-access-n7vs7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.404532 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c957503-1457-4a7b-a107-ed2135746cec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8c957503-1457-4a7b-a107-ed2135746cec" (UID: "8c957503-1457-4a7b-a107-ed2135746cec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.444695 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7vs7\" (UniqueName: \"kubernetes.io/projected/8c957503-1457-4a7b-a107-ed2135746cec-kube-api-access-n7vs7\") on node \"crc\" DevicePath \"\"" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.444740 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c957503-1457-4a7b-a107-ed2135746cec-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.620185 4800 generic.go:334] "Generic (PLEG): container finished" podID="4d1a5a68-892b-4837-8556-d52fff52c662" containerID="66c4d38036383feee31d199dd57619b2f11d05fcd64d4c6d443d4267201b5c7f" exitCode=0 Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.620233 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xspgj" event={"ID":"4d1a5a68-892b-4837-8556-d52fff52c662","Type":"ContainerDied","Data":"66c4d38036383feee31d199dd57619b2f11d05fcd64d4c6d443d4267201b5c7f"} Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.624247 4800 generic.go:334] "Generic (PLEG): container finished" podID="8c957503-1457-4a7b-a107-ed2135746cec" containerID="2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2" exitCode=0 Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.624313 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnwn4" event={"ID":"8c957503-1457-4a7b-a107-ed2135746cec","Type":"ContainerDied","Data":"2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2"} Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.624353 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnwn4" event={"ID":"8c957503-1457-4a7b-a107-ed2135746cec","Type":"ContainerDied","Data":"170aa653964f9fef191db2098f765ea251f9de4b7659676c36f0ff1c9b1ea661"} Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.624378 4800 scope.go:117] "RemoveContainer" containerID="2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.624516 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dnwn4" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.647462 4800 scope.go:117] "RemoveContainer" containerID="bdc8607027b2bb06fe1d8f99aefb0cad840c65ec0896e5f425ddd639a4382f04" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.667585 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dnwn4"] Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.674312 4800 scope.go:117] "RemoveContainer" containerID="33a8686d737718e5a8de47cb319bdaf3a905db824c5d2bee330496e292c156e5" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.675269 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dnwn4"] Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.691589 4800 scope.go:117] "RemoveContainer" containerID="2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2" Nov 25 15:33:02 crc kubenswrapper[4800]: E1125 15:33:02.692520 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2\": container with ID starting with 2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2 not found: ID does not exist" containerID="2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.692558 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2"} err="failed to get container status \"2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2\": rpc error: code = NotFound desc = could not find container \"2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2\": container with ID starting with 2815065a866ce8e5d0eb8acf49f710378ecfe4b134d9ba5bb67b09cf882a9ea2 not found: ID does not exist" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.692581 4800 scope.go:117] "RemoveContainer" containerID="bdc8607027b2bb06fe1d8f99aefb0cad840c65ec0896e5f425ddd639a4382f04" Nov 25 15:33:02 crc kubenswrapper[4800]: E1125 15:33:02.693154 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdc8607027b2bb06fe1d8f99aefb0cad840c65ec0896e5f425ddd639a4382f04\": container with ID starting with bdc8607027b2bb06fe1d8f99aefb0cad840c65ec0896e5f425ddd639a4382f04 not found: ID does not exist" containerID="bdc8607027b2bb06fe1d8f99aefb0cad840c65ec0896e5f425ddd639a4382f04" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.693213 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdc8607027b2bb06fe1d8f99aefb0cad840c65ec0896e5f425ddd639a4382f04"} err="failed to get container status \"bdc8607027b2bb06fe1d8f99aefb0cad840c65ec0896e5f425ddd639a4382f04\": rpc error: code = NotFound desc = could not find container \"bdc8607027b2bb06fe1d8f99aefb0cad840c65ec0896e5f425ddd639a4382f04\": container with ID starting with bdc8607027b2bb06fe1d8f99aefb0cad840c65ec0896e5f425ddd639a4382f04 not found: ID does not exist" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.693250 4800 scope.go:117] "RemoveContainer" containerID="33a8686d737718e5a8de47cb319bdaf3a905db824c5d2bee330496e292c156e5" Nov 25 15:33:02 crc kubenswrapper[4800]: E1125 15:33:02.693744 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33a8686d737718e5a8de47cb319bdaf3a905db824c5d2bee330496e292c156e5\": container with ID starting with 33a8686d737718e5a8de47cb319bdaf3a905db824c5d2bee330496e292c156e5 not found: ID does not exist" containerID="33a8686d737718e5a8de47cb319bdaf3a905db824c5d2bee330496e292c156e5" Nov 25 15:33:02 crc kubenswrapper[4800]: I1125 15:33:02.693775 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33a8686d737718e5a8de47cb319bdaf3a905db824c5d2bee330496e292c156e5"} err="failed to get container status \"33a8686d737718e5a8de47cb319bdaf3a905db824c5d2bee330496e292c156e5\": rpc error: code = NotFound desc = could not find container \"33a8686d737718e5a8de47cb319bdaf3a905db824c5d2bee330496e292c156e5\": container with ID starting with 33a8686d737718e5a8de47cb319bdaf3a905db824c5d2bee330496e292c156e5 not found: ID does not exist" Nov 25 15:33:03 crc kubenswrapper[4800]: I1125 15:33:03.639572 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xspgj" event={"ID":"4d1a5a68-892b-4837-8556-d52fff52c662","Type":"ContainerStarted","Data":"227d5b653346a255f8ac991b3f774c99464182c5c0b5d11eca59d174b2064c20"} Nov 25 15:33:03 crc kubenswrapper[4800]: I1125 15:33:03.662176 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xspgj" podStartSLOduration=2.136597049 podStartE2EDuration="4.662146458s" podCreationTimestamp="2025-11-25 15:32:59 +0000 UTC" firstStartedPulling="2025-11-25 15:33:00.606580578 +0000 UTC m=+941.660989060" lastFinishedPulling="2025-11-25 15:33:03.132129987 +0000 UTC m=+944.186538469" observedRunningTime="2025-11-25 15:33:03.659133322 +0000 UTC m=+944.713541804" watchObservedRunningTime="2025-11-25 15:33:03.662146458 +0000 UTC m=+944.716554940" Nov 25 15:33:03 crc kubenswrapper[4800]: I1125 15:33:03.795476 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c957503-1457-4a7b-a107-ed2135746cec" path="/var/lib/kubelet/pods/8c957503-1457-4a7b-a107-ed2135746cec/volumes" Nov 25 15:33:09 crc kubenswrapper[4800]: I1125 15:33:09.858061 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:33:09 crc kubenswrapper[4800]: I1125 15:33:09.859030 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:33:09 crc kubenswrapper[4800]: I1125 15:33:09.921773 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:33:10 crc kubenswrapper[4800]: I1125 15:33:10.720351 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:33:10 crc kubenswrapper[4800]: I1125 15:33:10.769972 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xspgj"] Nov 25 15:33:12 crc kubenswrapper[4800]: I1125 15:33:12.687054 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xspgj" podUID="4d1a5a68-892b-4837-8556-d52fff52c662" containerName="registry-server" containerID="cri-o://227d5b653346a255f8ac991b3f774c99464182c5c0b5d11eca59d174b2064c20" gracePeriod=2 Nov 25 15:33:13 crc kubenswrapper[4800]: I1125 15:33:13.695139 4800 generic.go:334] "Generic (PLEG): container finished" podID="4d1a5a68-892b-4837-8556-d52fff52c662" containerID="227d5b653346a255f8ac991b3f774c99464182c5c0b5d11eca59d174b2064c20" exitCode=0 Nov 25 15:33:13 crc kubenswrapper[4800]: I1125 15:33:13.695215 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xspgj" event={"ID":"4d1a5a68-892b-4837-8556-d52fff52c662","Type":"ContainerDied","Data":"227d5b653346a255f8ac991b3f774c99464182c5c0b5d11eca59d174b2064c20"} Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.327316 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.472675 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d1a5a68-892b-4837-8556-d52fff52c662-catalog-content\") pod \"4d1a5a68-892b-4837-8556-d52fff52c662\" (UID: \"4d1a5a68-892b-4837-8556-d52fff52c662\") " Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.472733 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdt8x\" (UniqueName: \"kubernetes.io/projected/4d1a5a68-892b-4837-8556-d52fff52c662-kube-api-access-cdt8x\") pod \"4d1a5a68-892b-4837-8556-d52fff52c662\" (UID: \"4d1a5a68-892b-4837-8556-d52fff52c662\") " Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.472822 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d1a5a68-892b-4837-8556-d52fff52c662-utilities\") pod \"4d1a5a68-892b-4837-8556-d52fff52c662\" (UID: \"4d1a5a68-892b-4837-8556-d52fff52c662\") " Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.474021 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d1a5a68-892b-4837-8556-d52fff52c662-utilities" (OuterVolumeSpecName: "utilities") pod "4d1a5a68-892b-4837-8556-d52fff52c662" (UID: "4d1a5a68-892b-4837-8556-d52fff52c662"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.480689 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d1a5a68-892b-4837-8556-d52fff52c662-kube-api-access-cdt8x" (OuterVolumeSpecName: "kube-api-access-cdt8x") pod "4d1a5a68-892b-4837-8556-d52fff52c662" (UID: "4d1a5a68-892b-4837-8556-d52fff52c662"). InnerVolumeSpecName "kube-api-access-cdt8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.492388 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d1a5a68-892b-4837-8556-d52fff52c662-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4d1a5a68-892b-4837-8556-d52fff52c662" (UID: "4d1a5a68-892b-4837-8556-d52fff52c662"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.574011 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdt8x\" (UniqueName: \"kubernetes.io/projected/4d1a5a68-892b-4837-8556-d52fff52c662-kube-api-access-cdt8x\") on node \"crc\" DevicePath \"\"" Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.574060 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d1a5a68-892b-4837-8556-d52fff52c662-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.574077 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d1a5a68-892b-4837-8556-d52fff52c662-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.703617 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xspgj" event={"ID":"4d1a5a68-892b-4837-8556-d52fff52c662","Type":"ContainerDied","Data":"30ad37cf6789da6b590e33ecd1c5de47fd84be75413d57232f65bc6c009de36d"} Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.703673 4800 scope.go:117] "RemoveContainer" containerID="227d5b653346a255f8ac991b3f774c99464182c5c0b5d11eca59d174b2064c20" Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.703742 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xspgj" Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.720427 4800 scope.go:117] "RemoveContainer" containerID="66c4d38036383feee31d199dd57619b2f11d05fcd64d4c6d443d4267201b5c7f" Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.735097 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xspgj"] Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.745352 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xspgj"] Nov 25 15:33:14 crc kubenswrapper[4800]: I1125 15:33:14.760572 4800 scope.go:117] "RemoveContainer" containerID="28552f5492a0731c10d51222fce39d8da18f73bf0c7a537cf9e6691a67af1712" Nov 25 15:33:15 crc kubenswrapper[4800]: I1125 15:33:15.792319 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d1a5a68-892b-4837-8556-d52fff52c662" path="/var/lib/kubelet/pods/4d1a5a68-892b-4837-8556-d52fff52c662/volumes" Nov 25 15:33:21 crc kubenswrapper[4800]: I1125 15:33:21.654756 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-667b8c5d74-psh88" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.417695 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-pjc8l"] Nov 25 15:33:22 crc kubenswrapper[4800]: E1125 15:33:22.418601 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c957503-1457-4a7b-a107-ed2135746cec" containerName="extract-utilities" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.418621 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c957503-1457-4a7b-a107-ed2135746cec" containerName="extract-utilities" Nov 25 15:33:22 crc kubenswrapper[4800]: E1125 15:33:22.418643 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d1a5a68-892b-4837-8556-d52fff52c662" containerName="registry-server" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.418652 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d1a5a68-892b-4837-8556-d52fff52c662" containerName="registry-server" Nov 25 15:33:22 crc kubenswrapper[4800]: E1125 15:33:22.418673 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c957503-1457-4a7b-a107-ed2135746cec" containerName="extract-content" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.418684 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c957503-1457-4a7b-a107-ed2135746cec" containerName="extract-content" Nov 25 15:33:22 crc kubenswrapper[4800]: E1125 15:33:22.418693 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d1a5a68-892b-4837-8556-d52fff52c662" containerName="extract-content" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.418700 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d1a5a68-892b-4837-8556-d52fff52c662" containerName="extract-content" Nov 25 15:33:22 crc kubenswrapper[4800]: E1125 15:33:22.418711 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c957503-1457-4a7b-a107-ed2135746cec" containerName="registry-server" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.418718 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c957503-1457-4a7b-a107-ed2135746cec" containerName="registry-server" Nov 25 15:33:22 crc kubenswrapper[4800]: E1125 15:33:22.418729 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d1a5a68-892b-4837-8556-d52fff52c662" containerName="extract-utilities" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.418736 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d1a5a68-892b-4837-8556-d52fff52c662" containerName="extract-utilities" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.418921 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c957503-1457-4a7b-a107-ed2135746cec" containerName="registry-server" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.418947 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d1a5a68-892b-4837-8556-d52fff52c662" containerName="registry-server" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.428363 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.430129 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2"] Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.431388 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.436472 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.436812 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.436480 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-lnsml" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.437139 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.443329 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2"] Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.553734 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-r7gdx"] Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.555408 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-r7gdx" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.560788 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.561655 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.561686 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-x4tq4" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.564271 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.576093 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-snhhr"] Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.577019 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.583427 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.606747 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-snhhr"] Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.612185 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-metrics-certs\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.612482 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05323c34-8333-474b-9713-a1b20ea27b72-cert\") pod \"frr-k8s-webhook-server-6998585d5-p7rt2\" (UID: \"05323c34-8333-474b-9713-a1b20ea27b72\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.612605 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9bkd\" (UniqueName: \"kubernetes.io/projected/05323c34-8333-474b-9713-a1b20ea27b72-kube-api-access-s9bkd\") pod \"frr-k8s-webhook-server-6998585d5-p7rt2\" (UID: \"05323c34-8333-474b-9713-a1b20ea27b72\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.612731 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-reloader\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.612866 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2d98\" (UniqueName: \"kubernetes.io/projected/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-kube-api-access-f2d98\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.613030 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-frr-conf\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.613085 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-frr-startup\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.613151 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-frr-sockets\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.613166 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-metrics\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.714944 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-memberlist\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.716094 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-reloader\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.716224 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8-metrics-certs\") pod \"controller-6c7b4b5f48-snhhr\" (UID: \"a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8\") " pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.716342 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2d98\" (UniqueName: \"kubernetes.io/projected/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-kube-api-access-f2d98\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.716435 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-metrics-certs\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.716530 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-frr-conf\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.716625 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jlrd\" (UniqueName: \"kubernetes.io/projected/a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8-kube-api-access-7jlrd\") pod \"controller-6c7b4b5f48-snhhr\" (UID: \"a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8\") " pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.716690 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-reloader\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.716706 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-frr-startup\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.716917 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qx6gr\" (UniqueName: \"kubernetes.io/projected/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-kube-api-access-qx6gr\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.717027 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-frr-sockets\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.717051 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-frr-conf\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.717118 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-metrics\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.717244 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-metallb-excludel2\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.717326 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-metrics-certs\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.717364 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05323c34-8333-474b-9713-a1b20ea27b72-cert\") pod \"frr-k8s-webhook-server-6998585d5-p7rt2\" (UID: \"05323c34-8333-474b-9713-a1b20ea27b72\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.717362 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-frr-sockets\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.717453 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9bkd\" (UniqueName: \"kubernetes.io/projected/05323c34-8333-474b-9713-a1b20ea27b72-kube-api-access-s9bkd\") pod \"frr-k8s-webhook-server-6998585d5-p7rt2\" (UID: \"05323c34-8333-474b-9713-a1b20ea27b72\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.717512 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8-cert\") pod \"controller-6c7b4b5f48-snhhr\" (UID: \"a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8\") " pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.717889 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-frr-startup\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.718394 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-metrics\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.724296 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-metrics-certs\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.736255 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05323c34-8333-474b-9713-a1b20ea27b72-cert\") pod \"frr-k8s-webhook-server-6998585d5-p7rt2\" (UID: \"05323c34-8333-474b-9713-a1b20ea27b72\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.738213 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2d98\" (UniqueName: \"kubernetes.io/projected/f37ddcfd-23c7-4052-8a17-4ea5fe5de78e-kube-api-access-f2d98\") pod \"frr-k8s-pjc8l\" (UID: \"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e\") " pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.738254 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9bkd\" (UniqueName: \"kubernetes.io/projected/05323c34-8333-474b-9713-a1b20ea27b72-kube-api-access-s9bkd\") pod \"frr-k8s-webhook-server-6998585d5-p7rt2\" (UID: \"05323c34-8333-474b-9713-a1b20ea27b72\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.764791 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.775464 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.819234 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-metrics-certs\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.819296 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jlrd\" (UniqueName: \"kubernetes.io/projected/a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8-kube-api-access-7jlrd\") pod \"controller-6c7b4b5f48-snhhr\" (UID: \"a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8\") " pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.819323 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qx6gr\" (UniqueName: \"kubernetes.io/projected/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-kube-api-access-qx6gr\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.819358 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-metallb-excludel2\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.819389 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8-cert\") pod \"controller-6c7b4b5f48-snhhr\" (UID: \"a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8\") " pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.819407 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-memberlist\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.819433 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8-metrics-certs\") pod \"controller-6c7b4b5f48-snhhr\" (UID: \"a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8\") " pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:22 crc kubenswrapper[4800]: E1125 15:33:22.819745 4800 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 15:33:22 crc kubenswrapper[4800]: E1125 15:33:22.819815 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-memberlist podName:74dd80b5-113a-476d-8f3d-dd49dfb10e8e nodeName:}" failed. No retries permitted until 2025-11-25 15:33:23.319795207 +0000 UTC m=+964.374203689 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-memberlist") pod "speaker-r7gdx" (UID: "74dd80b5-113a-476d-8f3d-dd49dfb10e8e") : secret "metallb-memberlist" not found Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.820360 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-metallb-excludel2\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.826394 4800 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.826494 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-metrics-certs\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.826753 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8-metrics-certs\") pod \"controller-6c7b4b5f48-snhhr\" (UID: \"a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8\") " pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.838808 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jlrd\" (UniqueName: \"kubernetes.io/projected/a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8-kube-api-access-7jlrd\") pod \"controller-6c7b4b5f48-snhhr\" (UID: \"a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8\") " pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.839143 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qx6gr\" (UniqueName: \"kubernetes.io/projected/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-kube-api-access-qx6gr\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.839596 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8-cert\") pod \"controller-6c7b4b5f48-snhhr\" (UID: \"a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8\") " pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:22 crc kubenswrapper[4800]: I1125 15:33:22.896000 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:23 crc kubenswrapper[4800]: I1125 15:33:23.141684 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-snhhr"] Nov 25 15:33:23 crc kubenswrapper[4800]: W1125 15:33:23.145808 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda9e4cf5b_2e7f_448e_8e0a_6bd74b29b0d8.slice/crio-f1b95f2e8e7ab5cbd9f6e84d253c03844f955a2cb448aa972c5716e487d50fa8 WatchSource:0}: Error finding container f1b95f2e8e7ab5cbd9f6e84d253c03844f955a2cb448aa972c5716e487d50fa8: Status 404 returned error can't find the container with id f1b95f2e8e7ab5cbd9f6e84d253c03844f955a2cb448aa972c5716e487d50fa8 Nov 25 15:33:23 crc kubenswrapper[4800]: I1125 15:33:23.268325 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2"] Nov 25 15:33:23 crc kubenswrapper[4800]: I1125 15:33:23.332536 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-memberlist\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:23 crc kubenswrapper[4800]: E1125 15:33:23.332743 4800 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 15:33:23 crc kubenswrapper[4800]: E1125 15:33:23.332859 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-memberlist podName:74dd80b5-113a-476d-8f3d-dd49dfb10e8e nodeName:}" failed. No retries permitted until 2025-11-25 15:33:24.332815327 +0000 UTC m=+965.387223809 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-memberlist") pod "speaker-r7gdx" (UID: "74dd80b5-113a-476d-8f3d-dd49dfb10e8e") : secret "metallb-memberlist" not found Nov 25 15:33:23 crc kubenswrapper[4800]: I1125 15:33:23.765724 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-snhhr" event={"ID":"a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8","Type":"ContainerStarted","Data":"3579c5324d29079e1e5bd5199ea5469f3883c23c118c5630a517c8a49cb34869"} Nov 25 15:33:23 crc kubenswrapper[4800]: I1125 15:33:23.765794 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-snhhr" event={"ID":"a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8","Type":"ContainerStarted","Data":"0d82ccbccc9ea7a2a2aa839f9635ca8b9a349f3c3e2635c7d40d0ec771cbabdd"} Nov 25 15:33:23 crc kubenswrapper[4800]: I1125 15:33:23.765814 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:23 crc kubenswrapper[4800]: I1125 15:33:23.765827 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-snhhr" event={"ID":"a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8","Type":"ContainerStarted","Data":"f1b95f2e8e7ab5cbd9f6e84d253c03844f955a2cb448aa972c5716e487d50fa8"} Nov 25 15:33:23 crc kubenswrapper[4800]: I1125 15:33:23.766799 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" event={"ID":"05323c34-8333-474b-9713-a1b20ea27b72","Type":"ContainerStarted","Data":"ac9b106a111af742ddf08f1174451a2399fa7cdecff23f66e9676cee30595b80"} Nov 25 15:33:23 crc kubenswrapper[4800]: I1125 15:33:23.767824 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pjc8l" event={"ID":"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e","Type":"ContainerStarted","Data":"5604bdfcab741abc4d1f09e091c3cb0fb85030eadf18a9078deb754755b37a31"} Nov 25 15:33:23 crc kubenswrapper[4800]: I1125 15:33:23.790656 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-snhhr" podStartSLOduration=1.79062914 podStartE2EDuration="1.79062914s" podCreationTimestamp="2025-11-25 15:33:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:33:23.784129585 +0000 UTC m=+964.838538087" watchObservedRunningTime="2025-11-25 15:33:23.79062914 +0000 UTC m=+964.845037622" Nov 25 15:33:24 crc kubenswrapper[4800]: I1125 15:33:24.350852 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-memberlist\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:24 crc kubenswrapper[4800]: I1125 15:33:24.371619 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/74dd80b5-113a-476d-8f3d-dd49dfb10e8e-memberlist\") pod \"speaker-r7gdx\" (UID: \"74dd80b5-113a-476d-8f3d-dd49dfb10e8e\") " pod="metallb-system/speaker-r7gdx" Nov 25 15:33:24 crc kubenswrapper[4800]: I1125 15:33:24.671551 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-r7gdx" Nov 25 15:33:24 crc kubenswrapper[4800]: I1125 15:33:24.776316 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-r7gdx" event={"ID":"74dd80b5-113a-476d-8f3d-dd49dfb10e8e","Type":"ContainerStarted","Data":"4de7dd091f3860dea3d18049132afb7e81e3fb93ff940dbc310c023e9731662c"} Nov 25 15:33:25 crc kubenswrapper[4800]: I1125 15:33:25.795173 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-r7gdx" event={"ID":"74dd80b5-113a-476d-8f3d-dd49dfb10e8e","Type":"ContainerStarted","Data":"7b266727725cf5f4751b92a497e9bef31ab919fad05954658b02090298263f32"} Nov 25 15:33:25 crc kubenswrapper[4800]: I1125 15:33:25.795698 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-r7gdx" Nov 25 15:33:25 crc kubenswrapper[4800]: I1125 15:33:25.795715 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-r7gdx" event={"ID":"74dd80b5-113a-476d-8f3d-dd49dfb10e8e","Type":"ContainerStarted","Data":"c0399f819a02a4096a7efb7d38ee3658255b7a17131e17a07a5ca8cb9819d027"} Nov 25 15:33:25 crc kubenswrapper[4800]: I1125 15:33:25.815669 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-r7gdx" podStartSLOduration=3.815643215 podStartE2EDuration="3.815643215s" podCreationTimestamp="2025-11-25 15:33:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:33:25.812950817 +0000 UTC m=+966.867359299" watchObservedRunningTime="2025-11-25 15:33:25.815643215 +0000 UTC m=+966.870051697" Nov 25 15:33:31 crc kubenswrapper[4800]: I1125 15:33:31.826792 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" event={"ID":"05323c34-8333-474b-9713-a1b20ea27b72","Type":"ContainerStarted","Data":"627d61450185c16704a7d096c6d7a68b83ce3004ae4456a37ca0b5fb13cbb7d6"} Nov 25 15:33:31 crc kubenswrapper[4800]: I1125 15:33:31.827633 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" Nov 25 15:33:31 crc kubenswrapper[4800]: I1125 15:33:31.828990 4800 generic.go:334] "Generic (PLEG): container finished" podID="f37ddcfd-23c7-4052-8a17-4ea5fe5de78e" containerID="7f31894a848ff5378cb3c5fb8c52327be876f59623ad85a1664bdda372d87131" exitCode=0 Nov 25 15:33:31 crc kubenswrapper[4800]: I1125 15:33:31.829028 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pjc8l" event={"ID":"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e","Type":"ContainerDied","Data":"7f31894a848ff5378cb3c5fb8c52327be876f59623ad85a1664bdda372d87131"} Nov 25 15:33:31 crc kubenswrapper[4800]: I1125 15:33:31.854187 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" podStartSLOduration=2.397588584 podStartE2EDuration="9.854163812s" podCreationTimestamp="2025-11-25 15:33:22 +0000 UTC" firstStartedPulling="2025-11-25 15:33:23.27644464 +0000 UTC m=+964.330853132" lastFinishedPulling="2025-11-25 15:33:30.733019888 +0000 UTC m=+971.787428360" observedRunningTime="2025-11-25 15:33:31.848654165 +0000 UTC m=+972.903062697" watchObservedRunningTime="2025-11-25 15:33:31.854163812 +0000 UTC m=+972.908572294" Nov 25 15:33:32 crc kubenswrapper[4800]: I1125 15:33:32.838439 4800 generic.go:334] "Generic (PLEG): container finished" podID="f37ddcfd-23c7-4052-8a17-4ea5fe5de78e" containerID="270c1b3c31903dd367de2f23b80c8812badf73b6fdc2a7a609d156397a8c850b" exitCode=0 Nov 25 15:33:32 crc kubenswrapper[4800]: I1125 15:33:32.839528 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pjc8l" event={"ID":"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e","Type":"ContainerDied","Data":"270c1b3c31903dd367de2f23b80c8812badf73b6fdc2a7a609d156397a8c850b"} Nov 25 15:33:33 crc kubenswrapper[4800]: I1125 15:33:33.848232 4800 generic.go:334] "Generic (PLEG): container finished" podID="f37ddcfd-23c7-4052-8a17-4ea5fe5de78e" containerID="a6a7bcc9f0ee5c6be4f4fdbaac10bcef450834055556660a3ac9b2f09ba12624" exitCode=0 Nov 25 15:33:33 crc kubenswrapper[4800]: I1125 15:33:33.848339 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pjc8l" event={"ID":"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e","Type":"ContainerDied","Data":"a6a7bcc9f0ee5c6be4f4fdbaac10bcef450834055556660a3ac9b2f09ba12624"} Nov 25 15:33:34 crc kubenswrapper[4800]: I1125 15:33:34.859762 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pjc8l" event={"ID":"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e","Type":"ContainerStarted","Data":"e8fef67264472208183ec0dc52b48ef3ea0ca79fe381419697513cd85332b354"} Nov 25 15:33:34 crc kubenswrapper[4800]: I1125 15:33:34.860207 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pjc8l" event={"ID":"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e","Type":"ContainerStarted","Data":"4e595fe177fb2f637e11d25a16f06899894dd90e32ef77b5da7c8ab53a11c672"} Nov 25 15:33:34 crc kubenswrapper[4800]: I1125 15:33:34.860226 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pjc8l" event={"ID":"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e","Type":"ContainerStarted","Data":"f21de79e51937c8fea809eb71f0d1445ac52a79b5695ac5ae1d80ab5349cb187"} Nov 25 15:33:34 crc kubenswrapper[4800]: I1125 15:33:34.860239 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pjc8l" event={"ID":"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e","Type":"ContainerStarted","Data":"751ad428e0e10961ed5427b788238f06c4283632c98878e0d8247d0150e2544e"} Nov 25 15:33:34 crc kubenswrapper[4800]: I1125 15:33:34.860250 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pjc8l" event={"ID":"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e","Type":"ContainerStarted","Data":"72bb5fea1dfedc5f125e03f508340073b324c2b00352325c25e06d06512cc036"} Nov 25 15:33:35 crc kubenswrapper[4800]: I1125 15:33:35.878137 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pjc8l" event={"ID":"f37ddcfd-23c7-4052-8a17-4ea5fe5de78e","Type":"ContainerStarted","Data":"3ef05340cba7298cfafc03b568db0ab4523b5e1e5575faf4dad7ef16e4637c77"} Nov 25 15:33:35 crc kubenswrapper[4800]: I1125 15:33:35.879949 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:35 crc kubenswrapper[4800]: I1125 15:33:35.905284 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-pjc8l" podStartSLOduration=6.179266388 podStartE2EDuration="13.905268458s" podCreationTimestamp="2025-11-25 15:33:22 +0000 UTC" firstStartedPulling="2025-11-25 15:33:22.989033352 +0000 UTC m=+964.043441834" lastFinishedPulling="2025-11-25 15:33:30.715035422 +0000 UTC m=+971.769443904" observedRunningTime="2025-11-25 15:33:35.902089429 +0000 UTC m=+976.956497911" watchObservedRunningTime="2025-11-25 15:33:35.905268458 +0000 UTC m=+976.959676940" Nov 25 15:33:37 crc kubenswrapper[4800]: I1125 15:33:37.766121 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:37 crc kubenswrapper[4800]: I1125 15:33:37.807005 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:42 crc kubenswrapper[4800]: I1125 15:33:42.640010 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:33:42 crc kubenswrapper[4800]: I1125 15:33:42.640485 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:33:42 crc kubenswrapper[4800]: I1125 15:33:42.780783 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" Nov 25 15:33:42 crc kubenswrapper[4800]: I1125 15:33:42.902538 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-snhhr" Nov 25 15:33:44 crc kubenswrapper[4800]: I1125 15:33:44.678187 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-r7gdx" Nov 25 15:33:47 crc kubenswrapper[4800]: I1125 15:33:47.527216 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-vhzmw"] Nov 25 15:33:47 crc kubenswrapper[4800]: I1125 15:33:47.528743 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vhzmw" Nov 25 15:33:47 crc kubenswrapper[4800]: I1125 15:33:47.537401 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 15:33:47 crc kubenswrapper[4800]: I1125 15:33:47.537537 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 15:33:47 crc kubenswrapper[4800]: I1125 15:33:47.537655 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-ndvrl" Nov 25 15:33:47 crc kubenswrapper[4800]: I1125 15:33:47.542811 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-vhzmw"] Nov 25 15:33:47 crc kubenswrapper[4800]: I1125 15:33:47.598544 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mdv4\" (UniqueName: \"kubernetes.io/projected/d31faa16-596b-4977-a186-437bb94c7122-kube-api-access-7mdv4\") pod \"openstack-operator-index-vhzmw\" (UID: \"d31faa16-596b-4977-a186-437bb94c7122\") " pod="openstack-operators/openstack-operator-index-vhzmw" Nov 25 15:33:47 crc kubenswrapper[4800]: I1125 15:33:47.700164 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mdv4\" (UniqueName: \"kubernetes.io/projected/d31faa16-596b-4977-a186-437bb94c7122-kube-api-access-7mdv4\") pod \"openstack-operator-index-vhzmw\" (UID: \"d31faa16-596b-4977-a186-437bb94c7122\") " pod="openstack-operators/openstack-operator-index-vhzmw" Nov 25 15:33:47 crc kubenswrapper[4800]: I1125 15:33:47.723611 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mdv4\" (UniqueName: \"kubernetes.io/projected/d31faa16-596b-4977-a186-437bb94c7122-kube-api-access-7mdv4\") pod \"openstack-operator-index-vhzmw\" (UID: \"d31faa16-596b-4977-a186-437bb94c7122\") " pod="openstack-operators/openstack-operator-index-vhzmw" Nov 25 15:33:47 crc kubenswrapper[4800]: I1125 15:33:47.857202 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vhzmw" Nov 25 15:33:48 crc kubenswrapper[4800]: I1125 15:33:48.297186 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-vhzmw"] Nov 25 15:33:48 crc kubenswrapper[4800]: I1125 15:33:48.964982 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vhzmw" event={"ID":"d31faa16-596b-4977-a186-437bb94c7122","Type":"ContainerStarted","Data":"458b03b69d978e0efc347f5bf3bf0f30aea10c6ce95e947e651f8319873d8e37"} Nov 25 15:33:49 crc kubenswrapper[4800]: I1125 15:33:49.975854 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vhzmw" event={"ID":"d31faa16-596b-4977-a186-437bb94c7122","Type":"ContainerStarted","Data":"09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2"} Nov 25 15:33:49 crc kubenswrapper[4800]: I1125 15:33:49.997322 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-vhzmw" podStartSLOduration=2.084153412 podStartE2EDuration="2.997291543s" podCreationTimestamp="2025-11-25 15:33:47 +0000 UTC" firstStartedPulling="2025-11-25 15:33:48.306123399 +0000 UTC m=+989.360531881" lastFinishedPulling="2025-11-25 15:33:49.21926153 +0000 UTC m=+990.273670012" observedRunningTime="2025-11-25 15:33:49.997247082 +0000 UTC m=+991.051655564" watchObservedRunningTime="2025-11-25 15:33:49.997291543 +0000 UTC m=+991.051700025" Nov 25 15:33:50 crc kubenswrapper[4800]: I1125 15:33:50.901721 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-vhzmw"] Nov 25 15:33:51 crc kubenswrapper[4800]: I1125 15:33:51.511979 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-6997m"] Nov 25 15:33:51 crc kubenswrapper[4800]: I1125 15:33:51.513113 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6997m" Nov 25 15:33:51 crc kubenswrapper[4800]: I1125 15:33:51.524704 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6997m"] Nov 25 15:33:51 crc kubenswrapper[4800]: I1125 15:33:51.661711 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb4q5\" (UniqueName: \"kubernetes.io/projected/8eb527be-d4e7-4803-8ce9-88201bb4e17e-kube-api-access-jb4q5\") pod \"openstack-operator-index-6997m\" (UID: \"8eb527be-d4e7-4803-8ce9-88201bb4e17e\") " pod="openstack-operators/openstack-operator-index-6997m" Nov 25 15:33:51 crc kubenswrapper[4800]: I1125 15:33:51.764297 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb4q5\" (UniqueName: \"kubernetes.io/projected/8eb527be-d4e7-4803-8ce9-88201bb4e17e-kube-api-access-jb4q5\") pod \"openstack-operator-index-6997m\" (UID: \"8eb527be-d4e7-4803-8ce9-88201bb4e17e\") " pod="openstack-operators/openstack-operator-index-6997m" Nov 25 15:33:51 crc kubenswrapper[4800]: I1125 15:33:51.789456 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb4q5\" (UniqueName: \"kubernetes.io/projected/8eb527be-d4e7-4803-8ce9-88201bb4e17e-kube-api-access-jb4q5\") pod \"openstack-operator-index-6997m\" (UID: \"8eb527be-d4e7-4803-8ce9-88201bb4e17e\") " pod="openstack-operators/openstack-operator-index-6997m" Nov 25 15:33:51 crc kubenswrapper[4800]: I1125 15:33:51.830962 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6997m" Nov 25 15:33:51 crc kubenswrapper[4800]: I1125 15:33:51.996858 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-vhzmw" podUID="d31faa16-596b-4977-a186-437bb94c7122" containerName="registry-server" containerID="cri-o://09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2" gracePeriod=2 Nov 25 15:33:52 crc kubenswrapper[4800]: I1125 15:33:52.067895 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6997m"] Nov 25 15:33:52 crc kubenswrapper[4800]: I1125 15:33:52.352388 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vhzmw" Nov 25 15:33:52 crc kubenswrapper[4800]: I1125 15:33:52.373757 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mdv4\" (UniqueName: \"kubernetes.io/projected/d31faa16-596b-4977-a186-437bb94c7122-kube-api-access-7mdv4\") pod \"d31faa16-596b-4977-a186-437bb94c7122\" (UID: \"d31faa16-596b-4977-a186-437bb94c7122\") " Nov 25 15:33:52 crc kubenswrapper[4800]: I1125 15:33:52.379811 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d31faa16-596b-4977-a186-437bb94c7122-kube-api-access-7mdv4" (OuterVolumeSpecName: "kube-api-access-7mdv4") pod "d31faa16-596b-4977-a186-437bb94c7122" (UID: "d31faa16-596b-4977-a186-437bb94c7122"). InnerVolumeSpecName "kube-api-access-7mdv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:33:52 crc kubenswrapper[4800]: I1125 15:33:52.476384 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mdv4\" (UniqueName: \"kubernetes.io/projected/d31faa16-596b-4977-a186-437bb94c7122-kube-api-access-7mdv4\") on node \"crc\" DevicePath \"\"" Nov 25 15:33:52 crc kubenswrapper[4800]: I1125 15:33:52.770641 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-pjc8l" Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.008657 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6997m" event={"ID":"8eb527be-d4e7-4803-8ce9-88201bb4e17e","Type":"ContainerStarted","Data":"b13986c8d259c941170f99fe9d6844c223ec3af908401df0907bc20d885c2429"} Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.008978 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6997m" event={"ID":"8eb527be-d4e7-4803-8ce9-88201bb4e17e","Type":"ContainerStarted","Data":"7e6776f78189ed3a94d4000eb215030b49fa66a4335356cfa088aca0a58c7610"} Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.010857 4800 generic.go:334] "Generic (PLEG): container finished" podID="d31faa16-596b-4977-a186-437bb94c7122" containerID="09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2" exitCode=0 Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.010890 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vhzmw" event={"ID":"d31faa16-596b-4977-a186-437bb94c7122","Type":"ContainerDied","Data":"09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2"} Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.010910 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vhzmw" event={"ID":"d31faa16-596b-4977-a186-437bb94c7122","Type":"ContainerDied","Data":"458b03b69d978e0efc347f5bf3bf0f30aea10c6ce95e947e651f8319873d8e37"} Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.010930 4800 scope.go:117] "RemoveContainer" containerID="09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2" Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.011129 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vhzmw" Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.040182 4800 scope.go:117] "RemoveContainer" containerID="09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2" Nov 25 15:33:53 crc kubenswrapper[4800]: E1125 15:33:53.043529 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2\": container with ID starting with 09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2 not found: ID does not exist" containerID="09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2" Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.043605 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2"} err="failed to get container status \"09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2\": rpc error: code = NotFound desc = could not find container \"09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2\": container with ID starting with 09977bf17547ee2620eb748182552fd4260a68abcbcc4799f2ddbac4463314f2 not found: ID does not exist" Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.049293 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-6997m" podStartSLOduration=1.5924702229999999 podStartE2EDuration="2.049265449s" podCreationTimestamp="2025-11-25 15:33:51 +0000 UTC" firstStartedPulling="2025-11-25 15:33:52.122596583 +0000 UTC m=+993.177005075" lastFinishedPulling="2025-11-25 15:33:52.579391779 +0000 UTC m=+993.633800301" observedRunningTime="2025-11-25 15:33:53.03435427 +0000 UTC m=+994.088762752" watchObservedRunningTime="2025-11-25 15:33:53.049265449 +0000 UTC m=+994.103673921" Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.055125 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-vhzmw"] Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.058981 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-vhzmw"] Nov 25 15:33:53 crc kubenswrapper[4800]: I1125 15:33:53.795176 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d31faa16-596b-4977-a186-437bb94c7122" path="/var/lib/kubelet/pods/d31faa16-596b-4977-a186-437bb94c7122/volumes" Nov 25 15:34:01 crc kubenswrapper[4800]: I1125 15:34:01.832245 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-6997m" Nov 25 15:34:01 crc kubenswrapper[4800]: I1125 15:34:01.833409 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-6997m" Nov 25 15:34:01 crc kubenswrapper[4800]: I1125 15:34:01.875640 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-6997m" Nov 25 15:34:02 crc kubenswrapper[4800]: I1125 15:34:02.118032 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-6997m" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.211684 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j"] Nov 25 15:34:03 crc kubenswrapper[4800]: E1125 15:34:03.211929 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d31faa16-596b-4977-a186-437bb94c7122" containerName="registry-server" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.211941 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="d31faa16-596b-4977-a186-437bb94c7122" containerName="registry-server" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.212062 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="d31faa16-596b-4977-a186-437bb94c7122" containerName="registry-server" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.212827 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.227031 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-l776f" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.250675 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j"] Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.348029 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c29c04aa-31cb-498e-b976-ce6f9d381ba2-bundle\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j\" (UID: \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.348130 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6bnk\" (UniqueName: \"kubernetes.io/projected/c29c04aa-31cb-498e-b976-ce6f9d381ba2-kube-api-access-s6bnk\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j\" (UID: \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.348156 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c29c04aa-31cb-498e-b976-ce6f9d381ba2-util\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j\" (UID: \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.449120 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6bnk\" (UniqueName: \"kubernetes.io/projected/c29c04aa-31cb-498e-b976-ce6f9d381ba2-kube-api-access-s6bnk\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j\" (UID: \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.449173 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c29c04aa-31cb-498e-b976-ce6f9d381ba2-util\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j\" (UID: \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.449246 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c29c04aa-31cb-498e-b976-ce6f9d381ba2-bundle\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j\" (UID: \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.449902 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c29c04aa-31cb-498e-b976-ce6f9d381ba2-bundle\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j\" (UID: \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.450371 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c29c04aa-31cb-498e-b976-ce6f9d381ba2-util\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j\" (UID: \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.484054 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6bnk\" (UniqueName: \"kubernetes.io/projected/c29c04aa-31cb-498e-b976-ce6f9d381ba2-kube-api-access-s6bnk\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j\" (UID: \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:03 crc kubenswrapper[4800]: I1125 15:34:03.530231 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:04 crc kubenswrapper[4800]: I1125 15:34:04.000129 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j"] Nov 25 15:34:04 crc kubenswrapper[4800]: I1125 15:34:04.095405 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" event={"ID":"c29c04aa-31cb-498e-b976-ce6f9d381ba2","Type":"ContainerStarted","Data":"0dddf208750493c9b1683ee26a8fa9ca1779a8065c41dc6d40c14a93c4c3830e"} Nov 25 15:34:05 crc kubenswrapper[4800]: I1125 15:34:05.108179 4800 generic.go:334] "Generic (PLEG): container finished" podID="c29c04aa-31cb-498e-b976-ce6f9d381ba2" containerID="7d89f2a052970bf7cabfcedf473d728544bb37e1988e57669a50add3a5690b6f" exitCode=0 Nov 25 15:34:05 crc kubenswrapper[4800]: I1125 15:34:05.108223 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" event={"ID":"c29c04aa-31cb-498e-b976-ce6f9d381ba2","Type":"ContainerDied","Data":"7d89f2a052970bf7cabfcedf473d728544bb37e1988e57669a50add3a5690b6f"} Nov 25 15:34:06 crc kubenswrapper[4800]: I1125 15:34:06.127123 4800 generic.go:334] "Generic (PLEG): container finished" podID="c29c04aa-31cb-498e-b976-ce6f9d381ba2" containerID="046eeaec69abab727921a9f887b4f290edf5eabbddd7758cc6b8d4a175d5bfb7" exitCode=0 Nov 25 15:34:06 crc kubenswrapper[4800]: I1125 15:34:06.127257 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" event={"ID":"c29c04aa-31cb-498e-b976-ce6f9d381ba2","Type":"ContainerDied","Data":"046eeaec69abab727921a9f887b4f290edf5eabbddd7758cc6b8d4a175d5bfb7"} Nov 25 15:34:07 crc kubenswrapper[4800]: I1125 15:34:07.136903 4800 generic.go:334] "Generic (PLEG): container finished" podID="c29c04aa-31cb-498e-b976-ce6f9d381ba2" containerID="f850be9be1b0ac890146473331f8117f8442a8fc54cd6899075df4b921b925bb" exitCode=0 Nov 25 15:34:07 crc kubenswrapper[4800]: I1125 15:34:07.136961 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" event={"ID":"c29c04aa-31cb-498e-b976-ce6f9d381ba2","Type":"ContainerDied","Data":"f850be9be1b0ac890146473331f8117f8442a8fc54cd6899075df4b921b925bb"} Nov 25 15:34:08 crc kubenswrapper[4800]: I1125 15:34:08.458881 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:08 crc kubenswrapper[4800]: I1125 15:34:08.624974 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c29c04aa-31cb-498e-b976-ce6f9d381ba2-util\") pod \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\" (UID: \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\") " Nov 25 15:34:08 crc kubenswrapper[4800]: I1125 15:34:08.625114 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6bnk\" (UniqueName: \"kubernetes.io/projected/c29c04aa-31cb-498e-b976-ce6f9d381ba2-kube-api-access-s6bnk\") pod \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\" (UID: \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\") " Nov 25 15:34:08 crc kubenswrapper[4800]: I1125 15:34:08.625204 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c29c04aa-31cb-498e-b976-ce6f9d381ba2-bundle\") pod \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\" (UID: \"c29c04aa-31cb-498e-b976-ce6f9d381ba2\") " Nov 25 15:34:08 crc kubenswrapper[4800]: I1125 15:34:08.626316 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c29c04aa-31cb-498e-b976-ce6f9d381ba2-bundle" (OuterVolumeSpecName: "bundle") pod "c29c04aa-31cb-498e-b976-ce6f9d381ba2" (UID: "c29c04aa-31cb-498e-b976-ce6f9d381ba2"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:34:08 crc kubenswrapper[4800]: I1125 15:34:08.626524 4800 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c29c04aa-31cb-498e-b976-ce6f9d381ba2-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:34:08 crc kubenswrapper[4800]: I1125 15:34:08.634332 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c29c04aa-31cb-498e-b976-ce6f9d381ba2-kube-api-access-s6bnk" (OuterVolumeSpecName: "kube-api-access-s6bnk") pod "c29c04aa-31cb-498e-b976-ce6f9d381ba2" (UID: "c29c04aa-31cb-498e-b976-ce6f9d381ba2"). InnerVolumeSpecName "kube-api-access-s6bnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:34:08 crc kubenswrapper[4800]: I1125 15:34:08.646312 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c29c04aa-31cb-498e-b976-ce6f9d381ba2-util" (OuterVolumeSpecName: "util") pod "c29c04aa-31cb-498e-b976-ce6f9d381ba2" (UID: "c29c04aa-31cb-498e-b976-ce6f9d381ba2"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:34:08 crc kubenswrapper[4800]: I1125 15:34:08.728493 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s6bnk\" (UniqueName: \"kubernetes.io/projected/c29c04aa-31cb-498e-b976-ce6f9d381ba2-kube-api-access-s6bnk\") on node \"crc\" DevicePath \"\"" Nov 25 15:34:08 crc kubenswrapper[4800]: I1125 15:34:08.728549 4800 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c29c04aa-31cb-498e-b976-ce6f9d381ba2-util\") on node \"crc\" DevicePath \"\"" Nov 25 15:34:09 crc kubenswrapper[4800]: I1125 15:34:09.156991 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" event={"ID":"c29c04aa-31cb-498e-b976-ce6f9d381ba2","Type":"ContainerDied","Data":"0dddf208750493c9b1683ee26a8fa9ca1779a8065c41dc6d40c14a93c4c3830e"} Nov 25 15:34:09 crc kubenswrapper[4800]: I1125 15:34:09.157064 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0dddf208750493c9b1683ee26a8fa9ca1779a8065c41dc6d40c14a93c4c3830e" Nov 25 15:34:09 crc kubenswrapper[4800]: I1125 15:34:09.157186 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j" Nov 25 15:34:12 crc kubenswrapper[4800]: I1125 15:34:12.640213 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:34:12 crc kubenswrapper[4800]: I1125 15:34:12.640796 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:34:16 crc kubenswrapper[4800]: I1125 15:34:16.442530 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf"] Nov 25 15:34:16 crc kubenswrapper[4800]: E1125 15:34:16.443725 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c29c04aa-31cb-498e-b976-ce6f9d381ba2" containerName="extract" Nov 25 15:34:16 crc kubenswrapper[4800]: I1125 15:34:16.443742 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c29c04aa-31cb-498e-b976-ce6f9d381ba2" containerName="extract" Nov 25 15:34:16 crc kubenswrapper[4800]: E1125 15:34:16.443769 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c29c04aa-31cb-498e-b976-ce6f9d381ba2" containerName="util" Nov 25 15:34:16 crc kubenswrapper[4800]: I1125 15:34:16.443776 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c29c04aa-31cb-498e-b976-ce6f9d381ba2" containerName="util" Nov 25 15:34:16 crc kubenswrapper[4800]: E1125 15:34:16.443784 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c29c04aa-31cb-498e-b976-ce6f9d381ba2" containerName="pull" Nov 25 15:34:16 crc kubenswrapper[4800]: I1125 15:34:16.443792 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c29c04aa-31cb-498e-b976-ce6f9d381ba2" containerName="pull" Nov 25 15:34:16 crc kubenswrapper[4800]: I1125 15:34:16.443930 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c29c04aa-31cb-498e-b976-ce6f9d381ba2" containerName="extract" Nov 25 15:34:16 crc kubenswrapper[4800]: I1125 15:34:16.444700 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" Nov 25 15:34:16 crc kubenswrapper[4800]: I1125 15:34:16.448086 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-57k69" Nov 25 15:34:16 crc kubenswrapper[4800]: I1125 15:34:16.482288 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf"] Nov 25 15:34:16 crc kubenswrapper[4800]: I1125 15:34:16.552451 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bjjh\" (UniqueName: \"kubernetes.io/projected/a8c767e3-c848-47bd-b73c-a48f11f634db-kube-api-access-5bjjh\") pod \"openstack-operator-controller-operator-7b567956b5-2vtsf\" (UID: \"a8c767e3-c848-47bd-b73c-a48f11f634db\") " pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" Nov 25 15:34:16 crc kubenswrapper[4800]: I1125 15:34:16.654632 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bjjh\" (UniqueName: \"kubernetes.io/projected/a8c767e3-c848-47bd-b73c-a48f11f634db-kube-api-access-5bjjh\") pod \"openstack-operator-controller-operator-7b567956b5-2vtsf\" (UID: \"a8c767e3-c848-47bd-b73c-a48f11f634db\") " pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" Nov 25 15:34:16 crc kubenswrapper[4800]: I1125 15:34:16.677272 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bjjh\" (UniqueName: \"kubernetes.io/projected/a8c767e3-c848-47bd-b73c-a48f11f634db-kube-api-access-5bjjh\") pod \"openstack-operator-controller-operator-7b567956b5-2vtsf\" (UID: \"a8c767e3-c848-47bd-b73c-a48f11f634db\") " pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" Nov 25 15:34:16 crc kubenswrapper[4800]: I1125 15:34:16.763371 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" Nov 25 15:34:17 crc kubenswrapper[4800]: I1125 15:34:17.042483 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf"] Nov 25 15:34:17 crc kubenswrapper[4800]: I1125 15:34:17.222005 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" event={"ID":"a8c767e3-c848-47bd-b73c-a48f11f634db","Type":"ContainerStarted","Data":"57dc102c61a91d138789712942747cad402900f2b556bbb62e30c4670a4b8c82"} Nov 25 15:34:22 crc kubenswrapper[4800]: I1125 15:34:22.264972 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" event={"ID":"a8c767e3-c848-47bd-b73c-a48f11f634db","Type":"ContainerStarted","Data":"d5e6e9dcbde9b34b5c773a3d7dc0456d7889f3d6d50c3f3f2e835a716edfb1ab"} Nov 25 15:34:22 crc kubenswrapper[4800]: I1125 15:34:22.265540 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" Nov 25 15:34:22 crc kubenswrapper[4800]: I1125 15:34:22.302141 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" podStartSLOduration=1.483338692 podStartE2EDuration="6.302118906s" podCreationTimestamp="2025-11-25 15:34:16 +0000 UTC" firstStartedPulling="2025-11-25 15:34:17.061582415 +0000 UTC m=+1018.115990917" lastFinishedPulling="2025-11-25 15:34:21.880362649 +0000 UTC m=+1022.934771131" observedRunningTime="2025-11-25 15:34:22.298582929 +0000 UTC m=+1023.352991431" watchObservedRunningTime="2025-11-25 15:34:22.302118906 +0000 UTC m=+1023.356527378" Nov 25 15:34:36 crc kubenswrapper[4800]: I1125 15:34:36.767737 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" Nov 25 15:34:42 crc kubenswrapper[4800]: I1125 15:34:42.640414 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:34:42 crc kubenswrapper[4800]: I1125 15:34:42.641017 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:34:42 crc kubenswrapper[4800]: I1125 15:34:42.641098 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:34:42 crc kubenswrapper[4800]: I1125 15:34:42.641976 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1a9b7db7d78c7762803114dfba2c97d5027abe1ed7fd4f553dedba984708c24e"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 15:34:42 crc kubenswrapper[4800]: I1125 15:34:42.642039 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://1a9b7db7d78c7762803114dfba2c97d5027abe1ed7fd4f553dedba984708c24e" gracePeriod=600 Nov 25 15:34:43 crc kubenswrapper[4800]: I1125 15:34:43.448524 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="1a9b7db7d78c7762803114dfba2c97d5027abe1ed7fd4f553dedba984708c24e" exitCode=0 Nov 25 15:34:43 crc kubenswrapper[4800]: I1125 15:34:43.449141 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"1a9b7db7d78c7762803114dfba2c97d5027abe1ed7fd4f553dedba984708c24e"} Nov 25 15:34:43 crc kubenswrapper[4800]: I1125 15:34:43.453016 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"d97d2c0b8a05e269074c76cf21138a3aeaeac0cd9bbe1be26dcd5369887e11f6"} Nov 25 15:34:43 crc kubenswrapper[4800]: I1125 15:34:43.453045 4800 scope.go:117] "RemoveContainer" containerID="4696a8bc43181471c595e8254afda59b2987f94a9cd2a837cdce4a6a707e3c00" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.123899 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.125506 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.128683 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-b4kcs" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.149915 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.155637 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.156801 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.160274 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-xldrf" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.165059 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.183909 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.185316 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.189160 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-q6r8x" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.201422 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.207944 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.222200 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.228746 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-m7zrc" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.248545 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.249653 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.255336 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-g4vfz" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.262361 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkb6j\" (UniqueName: \"kubernetes.io/projected/8976a97d-112c-4d56-b82f-74648f987a62-kube-api-access-zkb6j\") pod \"designate-operator-controller-manager-7d695c9b56-vdqnx\" (UID: \"8976a97d-112c-4d56-b82f-74648f987a62\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.262553 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wgfz\" (UniqueName: \"kubernetes.io/projected/05bb3a64-18f5-4b8d-bf4f-f46c5ba6c0ec-kube-api-access-9wgfz\") pod \"cinder-operator-controller-manager-79856dc55c-gnfx4\" (UID: \"05bb3a64-18f5-4b8d-bf4f-f46c5ba6c0ec\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.262629 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2424t\" (UniqueName: \"kubernetes.io/projected/959b58dd-55f0-4f7a-aa2e-24a868241ebe-kube-api-access-2424t\") pod \"barbican-operator-controller-manager-86dc4d89c8-d4svd\" (UID: \"959b58dd-55f0-4f7a-aa2e-24a868241ebe\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.268419 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.273512 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.300919 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.302654 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.316333 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-n67fx" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.322734 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.323900 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.340297 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-x6np2" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.341339 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.354283 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.355443 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.362348 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-8xcbf" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.363617 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkb6j\" (UniqueName: \"kubernetes.io/projected/8976a97d-112c-4d56-b82f-74648f987a62-kube-api-access-zkb6j\") pod \"designate-operator-controller-manager-7d695c9b56-vdqnx\" (UID: \"8976a97d-112c-4d56-b82f-74648f987a62\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.363689 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cmss\" (UniqueName: \"kubernetes.io/projected/b3ae53a0-88c1-4617-8052-f95d3b6d78d3-kube-api-access-6cmss\") pod \"glance-operator-controller-manager-68b95954c9-6bztx\" (UID: \"b3ae53a0-88c1-4617-8052-f95d3b6d78d3\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.363734 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wgfz\" (UniqueName: \"kubernetes.io/projected/05bb3a64-18f5-4b8d-bf4f-f46c5ba6c0ec-kube-api-access-9wgfz\") pod \"cinder-operator-controller-manager-79856dc55c-gnfx4\" (UID: \"05bb3a64-18f5-4b8d-bf4f-f46c5ba6c0ec\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.363773 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtdnq\" (UniqueName: \"kubernetes.io/projected/cb7f9b0c-c801-4935-8d52-02179a0cfed0-kube-api-access-xtdnq\") pod \"heat-operator-controller-manager-774b86978c-qfdl8\" (UID: \"cb7f9b0c-c801-4935-8d52-02179a0cfed0\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.363799 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2424t\" (UniqueName: \"kubernetes.io/projected/959b58dd-55f0-4f7a-aa2e-24a868241ebe-kube-api-access-2424t\") pod \"barbican-operator-controller-manager-86dc4d89c8-d4svd\" (UID: \"959b58dd-55f0-4f7a-aa2e-24a868241ebe\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.366928 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.377143 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.388967 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.390313 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.436532 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2424t\" (UniqueName: \"kubernetes.io/projected/959b58dd-55f0-4f7a-aa2e-24a868241ebe-kube-api-access-2424t\") pod \"barbican-operator-controller-manager-86dc4d89c8-d4svd\" (UID: \"959b58dd-55f0-4f7a-aa2e-24a868241ebe\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.437767 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-5xh6h" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.459035 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.460062 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wgfz\" (UniqueName: \"kubernetes.io/projected/05bb3a64-18f5-4b8d-bf4f-f46c5ba6c0ec-kube-api-access-9wgfz\") pod \"cinder-operator-controller-manager-79856dc55c-gnfx4\" (UID: \"05bb3a64-18f5-4b8d-bf4f-f46c5ba6c0ec\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.460224 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkb6j\" (UniqueName: \"kubernetes.io/projected/8976a97d-112c-4d56-b82f-74648f987a62-kube-api-access-zkb6j\") pod \"designate-operator-controller-manager-7d695c9b56-vdqnx\" (UID: \"8976a97d-112c-4d56-b82f-74648f987a62\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.467704 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9chfb\" (UniqueName: \"kubernetes.io/projected/c13855f7-d2e2-4a35-a7f0-2fe506ad36a5-kube-api-access-9chfb\") pod \"keystone-operator-controller-manager-748dc6576f-6clsb\" (UID: \"c13855f7-d2e2-4a35-a7f0-2fe506ad36a5\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.467818 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cmss\" (UniqueName: \"kubernetes.io/projected/b3ae53a0-88c1-4617-8052-f95d3b6d78d3-kube-api-access-6cmss\") pod \"glance-operator-controller-manager-68b95954c9-6bztx\" (UID: \"b3ae53a0-88c1-4617-8052-f95d3b6d78d3\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.467877 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z57wz\" (UniqueName: \"kubernetes.io/projected/e158909e-b254-40c0-95a8-9d5056889e6a-kube-api-access-z57wz\") pod \"horizon-operator-controller-manager-68c9694994-7kp74\" (UID: \"e158909e-b254-40c0-95a8-9d5056889e6a\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.467942 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f2d7618e-4f44-4ad7-b381-26039921a683-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-ksrss\" (UID: \"f2d7618e-4f44-4ad7-b381-26039921a683\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.467994 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rswcf\" (UniqueName: \"kubernetes.io/projected/1d4a540a-f8e3-4566-9d9f-05b2b5e26399-kube-api-access-rswcf\") pod \"ironic-operator-controller-manager-5bfcdc958c-ng4ng\" (UID: \"1d4a540a-f8e3-4566-9d9f-05b2b5e26399\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.468034 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcnrm\" (UniqueName: \"kubernetes.io/projected/f2d7618e-4f44-4ad7-b381-26039921a683-kube-api-access-zcnrm\") pod \"infra-operator-controller-manager-d5cc86f4b-ksrss\" (UID: \"f2d7618e-4f44-4ad7-b381-26039921a683\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.468077 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtdnq\" (UniqueName: \"kubernetes.io/projected/cb7f9b0c-c801-4935-8d52-02179a0cfed0-kube-api-access-xtdnq\") pod \"heat-operator-controller-manager-774b86978c-qfdl8\" (UID: \"cb7f9b0c-c801-4935-8d52-02179a0cfed0\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.485202 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.506309 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.514630 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtdnq\" (UniqueName: \"kubernetes.io/projected/cb7f9b0c-c801-4935-8d52-02179a0cfed0-kube-api-access-xtdnq\") pod \"heat-operator-controller-manager-774b86978c-qfdl8\" (UID: \"cb7f9b0c-c801-4935-8d52-02179a0cfed0\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.525566 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cmss\" (UniqueName: \"kubernetes.io/projected/b3ae53a0-88c1-4617-8052-f95d3b6d78d3-kube-api-access-6cmss\") pod \"glance-operator-controller-manager-68b95954c9-6bztx\" (UID: \"b3ae53a0-88c1-4617-8052-f95d3b6d78d3\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.526186 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.551211 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.586278 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.587636 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.589422 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9chfb\" (UniqueName: \"kubernetes.io/projected/c13855f7-d2e2-4a35-a7f0-2fe506ad36a5-kube-api-access-9chfb\") pod \"keystone-operator-controller-manager-748dc6576f-6clsb\" (UID: \"c13855f7-d2e2-4a35-a7f0-2fe506ad36a5\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.589469 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z57wz\" (UniqueName: \"kubernetes.io/projected/e158909e-b254-40c0-95a8-9d5056889e6a-kube-api-access-z57wz\") pod \"horizon-operator-controller-manager-68c9694994-7kp74\" (UID: \"e158909e-b254-40c0-95a8-9d5056889e6a\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.589505 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f2d7618e-4f44-4ad7-b381-26039921a683-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-ksrss\" (UID: \"f2d7618e-4f44-4ad7-b381-26039921a683\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.589533 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rswcf\" (UniqueName: \"kubernetes.io/projected/1d4a540a-f8e3-4566-9d9f-05b2b5e26399-kube-api-access-rswcf\") pod \"ironic-operator-controller-manager-5bfcdc958c-ng4ng\" (UID: \"1d4a540a-f8e3-4566-9d9f-05b2b5e26399\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.589557 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcnrm\" (UniqueName: \"kubernetes.io/projected/f2d7618e-4f44-4ad7-b381-26039921a683-kube-api-access-zcnrm\") pod \"infra-operator-controller-manager-d5cc86f4b-ksrss\" (UID: \"f2d7618e-4f44-4ad7-b381-26039921a683\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" Nov 25 15:34:54 crc kubenswrapper[4800]: E1125 15:34:54.590158 4800 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 15:34:54 crc kubenswrapper[4800]: E1125 15:34:54.590203 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f2d7618e-4f44-4ad7-b381-26039921a683-cert podName:f2d7618e-4f44-4ad7-b381-26039921a683 nodeName:}" failed. No retries permitted until 2025-11-25 15:34:55.090186942 +0000 UTC m=+1056.144595424 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f2d7618e-4f44-4ad7-b381-26039921a683-cert") pod "infra-operator-controller-manager-d5cc86f4b-ksrss" (UID: "f2d7618e-4f44-4ad7-b381-26039921a683") : secret "infra-operator-webhook-server-cert" not found Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.590538 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.612358 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.623336 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-zpxz8" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.628678 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.640946 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcnrm\" (UniqueName: \"kubernetes.io/projected/f2d7618e-4f44-4ad7-b381-26039921a683-kube-api-access-zcnrm\") pod \"infra-operator-controller-manager-d5cc86f4b-ksrss\" (UID: \"f2d7618e-4f44-4ad7-b381-26039921a683\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.663534 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rswcf\" (UniqueName: \"kubernetes.io/projected/1d4a540a-f8e3-4566-9d9f-05b2b5e26399-kube-api-access-rswcf\") pod \"ironic-operator-controller-manager-5bfcdc958c-ng4ng\" (UID: \"1d4a540a-f8e3-4566-9d9f-05b2b5e26399\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.681028 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.683259 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z57wz\" (UniqueName: \"kubernetes.io/projected/e158909e-b254-40c0-95a8-9d5056889e6a-kube-api-access-z57wz\") pod \"horizon-operator-controller-manager-68c9694994-7kp74\" (UID: \"e158909e-b254-40c0-95a8-9d5056889e6a\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.684900 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.686708 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.688891 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-jnqmj" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.690578 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l224z\" (UniqueName: \"kubernetes.io/projected/4682bc2d-38c7-4001-8dd8-095f444caa42-kube-api-access-l224z\") pod \"manila-operator-controller-manager-58bb8d67cc-r79jq\" (UID: \"4682bc2d-38c7-4001-8dd8-095f444caa42\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.694136 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9chfb\" (UniqueName: \"kubernetes.io/projected/c13855f7-d2e2-4a35-a7f0-2fe506ad36a5-kube-api-access-9chfb\") pod \"keystone-operator-controller-manager-748dc6576f-6clsb\" (UID: \"c13855f7-d2e2-4a35-a7f0-2fe506ad36a5\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.709622 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.712697 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.718331 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-tbn6v" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.739520 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.740682 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.744560 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-mjhf9" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.755933 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.757336 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.774940 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.785884 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-skkvx" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.786490 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.792933 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tcgl\" (UniqueName: \"kubernetes.io/projected/bf54f59d-2a26-4502-bb7d-b9aeabeb1645-kube-api-access-4tcgl\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-865xl\" (UID: \"bf54f59d-2a26-4502-bb7d-b9aeabeb1645\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.792987 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l224z\" (UniqueName: \"kubernetes.io/projected/4682bc2d-38c7-4001-8dd8-095f444caa42-kube-api-access-l224z\") pod \"manila-operator-controller-manager-58bb8d67cc-r79jq\" (UID: \"4682bc2d-38c7-4001-8dd8-095f444caa42\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.793039 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xnbp\" (UniqueName: \"kubernetes.io/projected/a206eabc-2689-4dc2-ac1a-066100be9382-kube-api-access-4xnbp\") pod \"neutron-operator-controller-manager-7c57c8bbc4-9mbm7\" (UID: \"a206eabc-2689-4dc2-ac1a-066100be9382\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.793070 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gb5dq\" (UniqueName: \"kubernetes.io/projected/16bad9b7-305a-4081-a7f5-671fd1a51f31-kube-api-access-gb5dq\") pod \"octavia-operator-controller-manager-fd75fd47d-5xcgj\" (UID: \"16bad9b7-305a-4081-a7f5-671fd1a51f31\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.793176 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trngb\" (UniqueName: \"kubernetes.io/projected/671d7b5e-65d1-4a29-9ef6-fd0e770203c5-kube-api-access-trngb\") pod \"nova-operator-controller-manager-79556f57fc-brcmf\" (UID: \"671d7b5e-65d1-4a29-9ef6-fd0e770203c5\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.822858 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.864707 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l224z\" (UniqueName: \"kubernetes.io/projected/4682bc2d-38c7-4001-8dd8-095f444caa42-kube-api-access-l224z\") pod \"manila-operator-controller-manager-58bb8d67cc-r79jq\" (UID: \"4682bc2d-38c7-4001-8dd8-095f444caa42\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.869820 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.874292 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.890903 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-n7hcq" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.906574 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.915065 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xnbp\" (UniqueName: \"kubernetes.io/projected/a206eabc-2689-4dc2-ac1a-066100be9382-kube-api-access-4xnbp\") pod \"neutron-operator-controller-manager-7c57c8bbc4-9mbm7\" (UID: \"a206eabc-2689-4dc2-ac1a-066100be9382\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.915211 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gb5dq\" (UniqueName: \"kubernetes.io/projected/16bad9b7-305a-4081-a7f5-671fd1a51f31-kube-api-access-gb5dq\") pod \"octavia-operator-controller-manager-fd75fd47d-5xcgj\" (UID: \"16bad9b7-305a-4081-a7f5-671fd1a51f31\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.915587 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trngb\" (UniqueName: \"kubernetes.io/projected/671d7b5e-65d1-4a29-9ef6-fd0e770203c5-kube-api-access-trngb\") pod \"nova-operator-controller-manager-79556f57fc-brcmf\" (UID: \"671d7b5e-65d1-4a29-9ef6-fd0e770203c5\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.915696 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tcgl\" (UniqueName: \"kubernetes.io/projected/bf54f59d-2a26-4502-bb7d-b9aeabeb1645-kube-api-access-4tcgl\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-865xl\" (UID: \"bf54f59d-2a26-4502-bb7d-b9aeabeb1645\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.920058 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.921409 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.927476 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-nw7q6" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.935573 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.952009 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.961393 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f"] Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.966235 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trngb\" (UniqueName: \"kubernetes.io/projected/671d7b5e-65d1-4a29-9ef6-fd0e770203c5-kube-api-access-trngb\") pod \"nova-operator-controller-manager-79556f57fc-brcmf\" (UID: \"671d7b5e-65d1-4a29-9ef6-fd0e770203c5\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.966269 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gb5dq\" (UniqueName: \"kubernetes.io/projected/16bad9b7-305a-4081-a7f5-671fd1a51f31-kube-api-access-gb5dq\") pod \"octavia-operator-controller-manager-fd75fd47d-5xcgj\" (UID: \"16bad9b7-305a-4081-a7f5-671fd1a51f31\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj" Nov 25 15:34:54 crc kubenswrapper[4800]: I1125 15:34:54.966659 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tcgl\" (UniqueName: \"kubernetes.io/projected/bf54f59d-2a26-4502-bb7d-b9aeabeb1645-kube-api-access-4tcgl\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-865xl\" (UID: \"bf54f59d-2a26-4502-bb7d-b9aeabeb1645\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.006422 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xnbp\" (UniqueName: \"kubernetes.io/projected/a206eabc-2689-4dc2-ac1a-066100be9382-kube-api-access-4xnbp\") pod \"neutron-operator-controller-manager-7c57c8bbc4-9mbm7\" (UID: \"a206eabc-2689-4dc2-ac1a-066100be9382\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.006644 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.006686 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.018605 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46w45\" (UniqueName: \"kubernetes.io/projected/b547724f-2a34-47b0-9125-668496d7dc6d-kube-api-access-46w45\") pod \"placement-operator-controller-manager-5db546f9d9-vhrgb\" (UID: \"b547724f-2a34-47b0-9125-668496d7dc6d\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.018659 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p49hw\" (UniqueName: \"kubernetes.io/projected/19a270d9-7165-4dae-942a-5a6daa2cf905-kube-api-access-p49hw\") pod \"ovn-operator-controller-manager-66cf5c67ff-2fb8f\" (UID: \"19a270d9-7165-4dae-942a-5a6daa2cf905\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.053022 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.065297 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.073238 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-5cdfw" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.073458 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.109909 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.123120 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.123341 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46w45\" (UniqueName: \"kubernetes.io/projected/b547724f-2a34-47b0-9125-668496d7dc6d-kube-api-access-46w45\") pod \"placement-operator-controller-manager-5db546f9d9-vhrgb\" (UID: \"b547724f-2a34-47b0-9125-668496d7dc6d\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.123388 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p49hw\" (UniqueName: \"kubernetes.io/projected/19a270d9-7165-4dae-942a-5a6daa2cf905-kube-api-access-p49hw\") pod \"ovn-operator-controller-manager-66cf5c67ff-2fb8f\" (UID: \"19a270d9-7165-4dae-942a-5a6daa2cf905\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.123428 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f2d7618e-4f44-4ad7-b381-26039921a683-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-ksrss\" (UID: \"f2d7618e-4f44-4ad7-b381-26039921a683\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.138196 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.140963 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f2d7618e-4f44-4ad7-b381-26039921a683-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-ksrss\" (UID: \"f2d7618e-4f44-4ad7-b381-26039921a683\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.141311 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.146103 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p49hw\" (UniqueName: \"kubernetes.io/projected/19a270d9-7165-4dae-942a-5a6daa2cf905-kube-api-access-p49hw\") pod \"ovn-operator-controller-manager-66cf5c67ff-2fb8f\" (UID: \"19a270d9-7165-4dae-942a-5a6daa2cf905\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.147067 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-drw24" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.156125 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.158012 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.161722 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46w45\" (UniqueName: \"kubernetes.io/projected/b547724f-2a34-47b0-9125-668496d7dc6d-kube-api-access-46w45\") pod \"placement-operator-controller-manager-5db546f9d9-vhrgb\" (UID: \"b547724f-2a34-47b0-9125-668496d7dc6d\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.169352 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.180289 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-fhg5c" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.199826 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.201587 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.208600 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-9dz7m" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.208699 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.225097 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e09fc035-4c04-486d-b4e7-6638d278c1d6-cert\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-5w9fm\" (UID: \"e09fc035-4c04-486d-b4e7-6638d278c1d6\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.225188 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lslb7\" (UniqueName: \"kubernetes.io/projected/fbc462d8-f085-4ffc-af8c-b91677ff3619-kube-api-access-lslb7\") pod \"swift-operator-controller-manager-6fdc4fcf86-gt68p\" (UID: \"fbc462d8-f085-4ffc-af8c-b91677ff3619\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.225251 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8662h\" (UniqueName: \"kubernetes.io/projected/e9539fdf-f01c-42c5-89a2-681d5c6142b4-kube-api-access-8662h\") pod \"telemetry-operator-controller-manager-567f98c9d-pbs6h\" (UID: \"e9539fdf-f01c-42c5-89a2-681d5c6142b4\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.225271 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bpcz\" (UniqueName: \"kubernetes.io/projected/e09fc035-4c04-486d-b4e7-6638d278c1d6-kube-api-access-8bpcz\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-5w9fm\" (UID: \"e09fc035-4c04-486d-b4e7-6638d278c1d6\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.229269 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.249613 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.259274 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.259766 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.286356 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.320388 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-95x9b"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.323999 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.326968 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-df749" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.327165 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8662h\" (UniqueName: \"kubernetes.io/projected/e9539fdf-f01c-42c5-89a2-681d5c6142b4-kube-api-access-8662h\") pod \"telemetry-operator-controller-manager-567f98c9d-pbs6h\" (UID: \"e9539fdf-f01c-42c5-89a2-681d5c6142b4\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.327212 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bpcz\" (UniqueName: \"kubernetes.io/projected/e09fc035-4c04-486d-b4e7-6638d278c1d6-kube-api-access-8bpcz\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-5w9fm\" (UID: \"e09fc035-4c04-486d-b4e7-6638d278c1d6\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.327254 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e09fc035-4c04-486d-b4e7-6638d278c1d6-cert\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-5w9fm\" (UID: \"e09fc035-4c04-486d-b4e7-6638d278c1d6\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.327282 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9nn7\" (UniqueName: \"kubernetes.io/projected/51bc0d61-40c5-404a-978c-414717c8e3e9-kube-api-access-h9nn7\") pod \"test-operator-controller-manager-5cb74df96-kxm4v\" (UID: \"51bc0d61-40c5-404a-978c-414717c8e3e9\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.327328 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lslb7\" (UniqueName: \"kubernetes.io/projected/fbc462d8-f085-4ffc-af8c-b91677ff3619-kube-api-access-lslb7\") pod \"swift-operator-controller-manager-6fdc4fcf86-gt68p\" (UID: \"fbc462d8-f085-4ffc-af8c-b91677ff3619\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" Nov 25 15:34:55 crc kubenswrapper[4800]: E1125 15:34:55.328136 4800 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 15:34:55 crc kubenswrapper[4800]: E1125 15:34:55.328182 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e09fc035-4c04-486d-b4e7-6638d278c1d6-cert podName:e09fc035-4c04-486d-b4e7-6638d278c1d6 nodeName:}" failed. No retries permitted until 2025-11-25 15:34:55.828166679 +0000 UTC m=+1056.882575161 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e09fc035-4c04-486d-b4e7-6638d278c1d6-cert") pod "openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" (UID: "e09fc035-4c04-486d-b4e7-6638d278c1d6") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.360420 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.365049 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8662h\" (UniqueName: \"kubernetes.io/projected/e9539fdf-f01c-42c5-89a2-681d5c6142b4-kube-api-access-8662h\") pod \"telemetry-operator-controller-manager-567f98c9d-pbs6h\" (UID: \"e9539fdf-f01c-42c5-89a2-681d5c6142b4\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.366617 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bpcz\" (UniqueName: \"kubernetes.io/projected/e09fc035-4c04-486d-b4e7-6638d278c1d6-kube-api-access-8bpcz\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-5w9fm\" (UID: \"e09fc035-4c04-486d-b4e7-6638d278c1d6\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.366637 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lslb7\" (UniqueName: \"kubernetes.io/projected/fbc462d8-f085-4ffc-af8c-b91677ff3619-kube-api-access-lslb7\") pod \"swift-operator-controller-manager-6fdc4fcf86-gt68p\" (UID: \"fbc462d8-f085-4ffc-af8c-b91677ff3619\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.375233 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.387275 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.389170 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-95x9b"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.399360 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.430715 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9nn7\" (UniqueName: \"kubernetes.io/projected/51bc0d61-40c5-404a-978c-414717c8e3e9-kube-api-access-h9nn7\") pod \"test-operator-controller-manager-5cb74df96-kxm4v\" (UID: \"51bc0d61-40c5-404a-978c-414717c8e3e9\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.430909 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4927\" (UniqueName: \"kubernetes.io/projected/71dd46d2-b3b0-4999-800c-03ac0a9758c6-kube-api-access-k4927\") pod \"watcher-operator-controller-manager-864885998-95x9b\" (UID: \"71dd46d2-b3b0-4999-800c-03ac0a9758c6\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.455410 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9nn7\" (UniqueName: \"kubernetes.io/projected/51bc0d61-40c5-404a-978c-414717c8e3e9-kube-api-access-h9nn7\") pod \"test-operator-controller-manager-5cb74df96-kxm4v\" (UID: \"51bc0d61-40c5-404a-978c-414717c8e3e9\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.455491 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.470480 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.478785 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-j5jpq" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.479052 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.479187 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.507792 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.523302 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.525561 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.532698 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4927\" (UniqueName: \"kubernetes.io/projected/71dd46d2-b3b0-4999-800c-03ac0a9758c6-kube-api-access-k4927\") pod \"watcher-operator-controller-manager-864885998-95x9b\" (UID: \"71dd46d2-b3b0-4999-800c-03ac0a9758c6\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.533012 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmwdf\" (UniqueName: \"kubernetes.io/projected/5a433244-abb3-4d43-a2b3-3266fd7234c0-kube-api-access-wmwdf\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.533187 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-metrics-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.533335 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-webhook-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.540367 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.542879 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-rf9gs" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.586718 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4927\" (UniqueName: \"kubernetes.io/projected/71dd46d2-b3b0-4999-800c-03ac0a9758c6-kube-api-access-k4927\") pod \"watcher-operator-controller-manager-864885998-95x9b\" (UID: \"71dd46d2-b3b0-4999-800c-03ac0a9758c6\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.634795 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-metrics-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.634857 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-webhook-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.634898 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dh445\" (UniqueName: \"kubernetes.io/projected/762dc32c-7527-4ab4-a5fc-b7780e6da7d2-kube-api-access-dh445\") pod \"rabbitmq-cluster-operator-manager-668c99d594-wbx2v\" (UID: \"762dc32c-7527-4ab4-a5fc-b7780e6da7d2\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.634940 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmwdf\" (UniqueName: \"kubernetes.io/projected/5a433244-abb3-4d43-a2b3-3266fd7234c0-kube-api-access-wmwdf\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:55 crc kubenswrapper[4800]: E1125 15:34:55.635023 4800 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 15:34:55 crc kubenswrapper[4800]: E1125 15:34:55.635115 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-metrics-certs podName:5a433244-abb3-4d43-a2b3-3266fd7234c0 nodeName:}" failed. No retries permitted until 2025-11-25 15:34:56.135091911 +0000 UTC m=+1057.189500393 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-metrics-certs") pod "openstack-operator-controller-manager-7cd5954d9-hh5m4" (UID: "5a433244-abb3-4d43-a2b3-3266fd7234c0") : secret "metrics-server-cert" not found Nov 25 15:34:55 crc kubenswrapper[4800]: E1125 15:34:55.635387 4800 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 15:34:55 crc kubenswrapper[4800]: E1125 15:34:55.635468 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-webhook-certs podName:5a433244-abb3-4d43-a2b3-3266fd7234c0 nodeName:}" failed. No retries permitted until 2025-11-25 15:34:56.135452021 +0000 UTC m=+1057.189860503 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-webhook-certs") pod "openstack-operator-controller-manager-7cd5954d9-hh5m4" (UID: "5a433244-abb3-4d43-a2b3-3266fd7234c0") : secret "webhook-server-cert" not found Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.663207 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmwdf\" (UniqueName: \"kubernetes.io/projected/5a433244-abb3-4d43-a2b3-3266fd7234c0-kube-api-access-wmwdf\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.710640 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.740010 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dh445\" (UniqueName: \"kubernetes.io/projected/762dc32c-7527-4ab4-a5fc-b7780e6da7d2-kube-api-access-dh445\") pod \"rabbitmq-cluster-operator-manager-668c99d594-wbx2v\" (UID: \"762dc32c-7527-4ab4-a5fc-b7780e6da7d2\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.789694 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dh445\" (UniqueName: \"kubernetes.io/projected/762dc32c-7527-4ab4-a5fc-b7780e6da7d2-kube-api-access-dh445\") pod \"rabbitmq-cluster-operator-manager-668c99d594-wbx2v\" (UID: \"762dc32c-7527-4ab4-a5fc-b7780e6da7d2\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.818312 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.848653 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e09fc035-4c04-486d-b4e7-6638d278c1d6-cert\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-5w9fm\" (UID: \"e09fc035-4c04-486d-b4e7-6638d278c1d6\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" Nov 25 15:34:55 crc kubenswrapper[4800]: E1125 15:34:55.850613 4800 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 15:34:55 crc kubenswrapper[4800]: E1125 15:34:55.850664 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e09fc035-4c04-486d-b4e7-6638d278c1d6-cert podName:e09fc035-4c04-486d-b4e7-6638d278c1d6 nodeName:}" failed. No retries permitted until 2025-11-25 15:34:56.850648876 +0000 UTC m=+1057.905057358 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e09fc035-4c04-486d-b4e7-6638d278c1d6-cert") pod "openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" (UID: "e09fc035-4c04-486d-b4e7-6638d278c1d6") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.854103 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.854157 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4"] Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.854170 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd"] Nov 25 15:34:55 crc kubenswrapper[4800]: W1125 15:34:55.904127 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05bb3a64_18f5_4b8d_bf4f_f46c5ba6c0ec.slice/crio-647ca5ce4eead9fe7de8c244155eee33833a01b59344398423169de04143e2d5 WatchSource:0}: Error finding container 647ca5ce4eead9fe7de8c244155eee33833a01b59344398423169de04143e2d5: Status 404 returned error can't find the container with id 647ca5ce4eead9fe7de8c244155eee33833a01b59344398423169de04143e2d5 Nov 25 15:34:55 crc kubenswrapper[4800]: I1125 15:34:55.929856 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.153780 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-metrics-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.153859 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-webhook-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.154003 4800 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.154052 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-webhook-certs podName:5a433244-abb3-4d43-a2b3-3266fd7234c0 nodeName:}" failed. No retries permitted until 2025-11-25 15:34:57.154037563 +0000 UTC m=+1058.208446045 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-webhook-certs") pod "openstack-operator-controller-manager-7cd5954d9-hh5m4" (UID: "5a433244-abb3-4d43-a2b3-3266fd7234c0") : secret "webhook-server-cert" not found Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.154421 4800 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.154452 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-metrics-certs podName:5a433244-abb3-4d43-a2b3-3266fd7234c0 nodeName:}" failed. No retries permitted until 2025-11-25 15:34:57.154441954 +0000 UTC m=+1058.208850436 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-metrics-certs") pod "openstack-operator-controller-manager-7cd5954d9-hh5m4" (UID: "5a433244-abb3-4d43-a2b3-3266fd7234c0") : secret "metrics-server-cert" not found Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.178406 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx"] Nov 25 15:34:56 crc kubenswrapper[4800]: W1125 15:34:56.185101 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3ae53a0_88c1_4617_8052_f95d3b6d78d3.slice/crio-8cfc0575edb9be1305ae248003d11f4f2efec8a4de8d1b3c19ef80065ecfc702 WatchSource:0}: Error finding container 8cfc0575edb9be1305ae248003d11f4f2efec8a4de8d1b3c19ef80065ecfc702: Status 404 returned error can't find the container with id 8cfc0575edb9be1305ae248003d11f4f2efec8a4de8d1b3c19ef80065ecfc702 Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.211793 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8"] Nov 25 15:34:56 crc kubenswrapper[4800]: W1125 15:34:56.214926 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb7f9b0c_c801_4935_8d52_02179a0cfed0.slice/crio-82a7a9f18dd5d219de46a419d917c301d01f075e96cb423311b7aebe552ab75b WatchSource:0}: Error finding container 82a7a9f18dd5d219de46a419d917c301d01f075e96cb423311b7aebe552ab75b: Status 404 returned error can't find the container with id 82a7a9f18dd5d219de46a419d917c301d01f075e96cb423311b7aebe552ab75b Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.481526 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7"] Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.537770 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng"] Nov 25 15:34:56 crc kubenswrapper[4800]: W1125 15:34:56.553172 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d4a540a_f8e3_4566_9d9f_05b2b5e26399.slice/crio-e88d55b387471295ff5be21362fbc60567d591c1860e97229684c246a4f1a63a WatchSource:0}: Error finding container e88d55b387471295ff5be21362fbc60567d591c1860e97229684c246a4f1a63a: Status 404 returned error can't find the container with id e88d55b387471295ff5be21362fbc60567d591c1860e97229684c246a4f1a63a Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.561176 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb"] Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.580968 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf"] Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.597953 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl"] Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.610047 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74"] Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.620933 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq"] Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.628479 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4" event={"ID":"05bb3a64-18f5-4b8d-bf4f-f46c5ba6c0ec","Type":"ContainerStarted","Data":"647ca5ce4eead9fe7de8c244155eee33833a01b59344398423169de04143e2d5"} Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.639392 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb" event={"ID":"c13855f7-d2e2-4a35-a7f0-2fe506ad36a5","Type":"ContainerStarted","Data":"d318a09254084db97c8593d6e5f936adcdca6aa9d376ff530a82659c74ab1795"} Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.671873 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zcnrm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-d5cc86f4b-ksrss_openstack-operators(f2d7618e-4f44-4ad7-b381-26039921a683): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.672149 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx" event={"ID":"8976a97d-112c-4d56-b82f-74648f987a62","Type":"ContainerStarted","Data":"d73116cfbbb4b23988b348e456c9a08d7518b8155bdc74054aa474d94665fc8d"} Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.673041 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h9nn7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cb74df96-kxm4v_openstack-operators(51bc0d61-40c5-404a-978c-414717c8e3e9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.673581 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-p49hw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-66cf5c67ff-2fb8f_openstack-operators(19a270d9-7165-4dae-942a-5a6daa2cf905): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.674396 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zcnrm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-d5cc86f4b-ksrss_openstack-operators(f2d7618e-4f44-4ad7-b381-26039921a683): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.675813 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-p49hw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-66cf5c67ff-2fb8f_openstack-operators(19a270d9-7165-4dae-942a-5a6daa2cf905): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.676393 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" podUID="f2d7618e-4f44-4ad7-b381-26039921a683" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.677528 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" podUID="19a270d9-7165-4dae-942a-5a6daa2cf905" Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.678285 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl" event={"ID":"bf54f59d-2a26-4502-bb7d-b9aeabeb1645","Type":"ContainerStarted","Data":"9678da0eb8ccd9002b44db10c077151017fb68e523e05e8c8f118d49bb9397a3"} Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.678674 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h9nn7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cb74df96-kxm4v_openstack-operators(51bc0d61-40c5-404a-978c-414717c8e3e9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.680698 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" podUID="51bc0d61-40c5-404a-978c-414717c8e3e9" Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.684432 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj"] Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.685288 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8" event={"ID":"cb7f9b0c-c801-4935-8d52-02179a0cfed0","Type":"ContainerStarted","Data":"82a7a9f18dd5d219de46a419d917c301d01f075e96cb423311b7aebe552ab75b"} Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.701420 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd" event={"ID":"959b58dd-55f0-4f7a-aa2e-24a868241ebe","Type":"ContainerStarted","Data":"ae76f39c6248cf54caaa907dae35e8c7efd3496d992e450f41ad66a2944f0857"} Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.702125 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-46w45,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5db546f9d9-vhrgb_openstack-operators(b547724f-2a34-47b0-9125-668496d7dc6d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.703995 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lslb7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-gt68p_openstack-operators(fbc462d8-f085-4ffc-af8c-b91677ff3619): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.704302 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v"] Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.707029 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lslb7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-gt68p_openstack-operators(fbc462d8-f085-4ffc-af8c-b91677ff3619): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.707722 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng" event={"ID":"1d4a540a-f8e3-4566-9d9f-05b2b5e26399","Type":"ContainerStarted","Data":"e88d55b387471295ff5be21362fbc60567d591c1860e97229684c246a4f1a63a"} Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.706378 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-46w45,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5db546f9d9-vhrgb_openstack-operators(b547724f-2a34-47b0-9125-668496d7dc6d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.708328 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" podUID="fbc462d8-f085-4ffc-af8c-b91677ff3619" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.709448 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" podUID="b547724f-2a34-47b0-9125-668496d7dc6d" Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.710366 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f"] Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.711759 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf" event={"ID":"671d7b5e-65d1-4a29-9ef6-fd0e770203c5","Type":"ContainerStarted","Data":"13e05ea7d08f749cf820070254787c7bf83c24b16849322c74625e02313e6af5"} Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.714917 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7" event={"ID":"a206eabc-2689-4dc2-ac1a-066100be9382","Type":"ContainerStarted","Data":"669b5568a5349d975e2314f946a6095192d4d40f743327240773aa410881790b"} Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.716674 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx" event={"ID":"b3ae53a0-88c1-4617-8052-f95d3b6d78d3","Type":"ContainerStarted","Data":"8cfc0575edb9be1305ae248003d11f4f2efec8a4de8d1b3c19ef80065ecfc702"} Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.716879 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss"] Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.721653 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb"] Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.726164 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p"] Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.730076 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h"] Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.802226 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-95x9b"] Nov 25 15:34:56 crc kubenswrapper[4800]: W1125 15:34:56.805880 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71dd46d2_b3b0_4999_800c_03ac0a9758c6.slice/crio-f22275c5ebaa000e1454d5184928bd72f2a5f6ff71f03327255fd1d738307f8f WatchSource:0}: Error finding container f22275c5ebaa000e1454d5184928bd72f2a5f6ff71f03327255fd1d738307f8f: Status 404 returned error can't find the container with id f22275c5ebaa000e1454d5184928bd72f2a5f6ff71f03327255fd1d738307f8f Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.826518 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-k4927,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-95x9b_openstack-operators(71dd46d2-b3b0-4999-800c-03ac0a9758c6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.828986 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-k4927,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-95x9b_openstack-operators(71dd46d2-b3b0-4999-800c-03ac0a9758c6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.831211 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" podUID="71dd46d2-b3b0-4999-800c-03ac0a9758c6" Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.848148 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v"] Nov 25 15:34:56 crc kubenswrapper[4800]: W1125 15:34:56.854469 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod762dc32c_7527_4ab4_a5fc_b7780e6da7d2.slice/crio-b7a7e89c024a335bfe38ac87a74fde287abaee0a7b9c56b46bdfb14ef89d279b WatchSource:0}: Error finding container b7a7e89c024a335bfe38ac87a74fde287abaee0a7b9c56b46bdfb14ef89d279b: Status 404 returned error can't find the container with id b7a7e89c024a335bfe38ac87a74fde287abaee0a7b9c56b46bdfb14ef89d279b Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.858418 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dh445,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-wbx2v_openstack-operators(762dc32c-7527-4ab4-a5fc-b7780e6da7d2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 15:34:56 crc kubenswrapper[4800]: E1125 15:34:56.860091 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" podUID="762dc32c-7527-4ab4-a5fc-b7780e6da7d2" Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.872075 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e09fc035-4c04-486d-b4e7-6638d278c1d6-cert\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-5w9fm\" (UID: \"e09fc035-4c04-486d-b4e7-6638d278c1d6\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.884081 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e09fc035-4c04-486d-b4e7-6638d278c1d6-cert\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-5w9fm\" (UID: \"e09fc035-4c04-486d-b4e7-6638d278c1d6\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" Nov 25 15:34:56 crc kubenswrapper[4800]: I1125 15:34:56.924858 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.181603 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-metrics-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.181954 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-webhook-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:57 crc kubenswrapper[4800]: E1125 15:34:57.182116 4800 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 15:34:57 crc kubenswrapper[4800]: E1125 15:34:57.182173 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-webhook-certs podName:5a433244-abb3-4d43-a2b3-3266fd7234c0 nodeName:}" failed. No retries permitted until 2025-11-25 15:34:59.182157288 +0000 UTC m=+1060.236565770 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-webhook-certs") pod "openstack-operator-controller-manager-7cd5954d9-hh5m4" (UID: "5a433244-abb3-4d43-a2b3-3266fd7234c0") : secret "webhook-server-cert" not found Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.201584 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-metrics-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.371080 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm"] Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.733651 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h" event={"ID":"e9539fdf-f01c-42c5-89a2-681d5c6142b4","Type":"ContainerStarted","Data":"e7e33ce59d2bc6c2b27758c26a3997dbf17b9c055ef6e2de46a42b6f9a1efaf3"} Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.741164 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" event={"ID":"b547724f-2a34-47b0-9125-668496d7dc6d","Type":"ContainerStarted","Data":"928f379eb0173b09995ccfc96b44db89df38b8fd6b3556791720ab2608f18d97"} Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.742988 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" event={"ID":"e09fc035-4c04-486d-b4e7-6638d278c1d6","Type":"ContainerStarted","Data":"726fc1001ab2e043f85c50659318b4fbc9ddabfe2b914aed9640ba1767f77dc0"} Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.748379 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" event={"ID":"f2d7618e-4f44-4ad7-b381-26039921a683","Type":"ContainerStarted","Data":"cf4ca66a0cba6dd4c224c787a42c3853f1ef7b410f20cf253c94f1aa60217282"} Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.753083 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" event={"ID":"762dc32c-7527-4ab4-a5fc-b7780e6da7d2","Type":"ContainerStarted","Data":"b7a7e89c024a335bfe38ac87a74fde287abaee0a7b9c56b46bdfb14ef89d279b"} Nov 25 15:34:57 crc kubenswrapper[4800]: E1125 15:34:57.758241 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" podUID="b547724f-2a34-47b0-9125-668496d7dc6d" Nov 25 15:34:57 crc kubenswrapper[4800]: E1125 15:34:57.758287 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" podUID="762dc32c-7527-4ab4-a5fc-b7780e6da7d2" Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.758826 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" event={"ID":"71dd46d2-b3b0-4999-800c-03ac0a9758c6","Type":"ContainerStarted","Data":"f22275c5ebaa000e1454d5184928bd72f2a5f6ff71f03327255fd1d738307f8f"} Nov 25 15:34:57 crc kubenswrapper[4800]: E1125 15:34:57.772785 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" podUID="71dd46d2-b3b0-4999-800c-03ac0a9758c6" Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.773083 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" event={"ID":"fbc462d8-f085-4ffc-af8c-b91677ff3619","Type":"ContainerStarted","Data":"9304f18167a7f0d826141b709924f089a707a8dffc5d938cf3647c7d7ec27f4e"} Nov 25 15:34:57 crc kubenswrapper[4800]: E1125 15:34:57.774069 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" podUID="f2d7618e-4f44-4ad7-b381-26039921a683" Nov 25 15:34:57 crc kubenswrapper[4800]: E1125 15:34:57.779285 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" podUID="fbc462d8-f085-4ffc-af8c-b91677ff3619" Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.780272 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" event={"ID":"51bc0d61-40c5-404a-978c-414717c8e3e9","Type":"ContainerStarted","Data":"bed86b3816295893c4739fcfa2153269f661c1382e3b0690817f8a5cf0c33eba"} Nov 25 15:34:57 crc kubenswrapper[4800]: E1125 15:34:57.801194 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" podUID="51bc0d61-40c5-404a-978c-414717c8e3e9" Nov 25 15:34:57 crc kubenswrapper[4800]: E1125 15:34:57.814013 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" podUID="19a270d9-7165-4dae-942a-5a6daa2cf905" Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.827952 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" event={"ID":"19a270d9-7165-4dae-942a-5a6daa2cf905","Type":"ContainerStarted","Data":"425a98d8ec7768d2dcd3da44577b50a0a36b5fd6ce8928dc5805dc59b77eddd4"} Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.828005 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq" event={"ID":"4682bc2d-38c7-4001-8dd8-095f444caa42","Type":"ContainerStarted","Data":"acb6a7e0e8415f53aab3f20241b75bda6237f30575f238df8ee733fbbf5f4206"} Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.842496 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74" event={"ID":"e158909e-b254-40c0-95a8-9d5056889e6a","Type":"ContainerStarted","Data":"a12e50be797929eb23e030b86b039924f260e623d1d0ab79081cb60766a2ff5c"} Nov 25 15:34:57 crc kubenswrapper[4800]: I1125 15:34:57.846639 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj" event={"ID":"16bad9b7-305a-4081-a7f5-671fd1a51f31","Type":"ContainerStarted","Data":"c1b6a9118c3b28d559bf297ce72be1338b8fa4ac47d7250819274deb4189af89"} Nov 25 15:34:58 crc kubenswrapper[4800]: E1125 15:34:58.869977 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" podUID="762dc32c-7527-4ab4-a5fc-b7780e6da7d2" Nov 25 15:34:58 crc kubenswrapper[4800]: E1125 15:34:58.889202 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" podUID="19a270d9-7165-4dae-942a-5a6daa2cf905" Nov 25 15:34:58 crc kubenswrapper[4800]: E1125 15:34:58.889293 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" podUID="b547724f-2a34-47b0-9125-668496d7dc6d" Nov 25 15:34:58 crc kubenswrapper[4800]: E1125 15:34:58.889641 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" podUID="71dd46d2-b3b0-4999-800c-03ac0a9758c6" Nov 25 15:34:58 crc kubenswrapper[4800]: E1125 15:34:58.890135 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" podUID="f2d7618e-4f44-4ad7-b381-26039921a683" Nov 25 15:34:58 crc kubenswrapper[4800]: E1125 15:34:58.890779 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" podUID="fbc462d8-f085-4ffc-af8c-b91677ff3619" Nov 25 15:34:58 crc kubenswrapper[4800]: E1125 15:34:58.913513 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" podUID="51bc0d61-40c5-404a-978c-414717c8e3e9" Nov 25 15:34:59 crc kubenswrapper[4800]: I1125 15:34:59.238279 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-webhook-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:59 crc kubenswrapper[4800]: I1125 15:34:59.271225 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5a433244-abb3-4d43-a2b3-3266fd7234c0-webhook-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-hh5m4\" (UID: \"5a433244-abb3-4d43-a2b3-3266fd7234c0\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:59 crc kubenswrapper[4800]: I1125 15:34:59.468091 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:34:59 crc kubenswrapper[4800]: E1125 15:34:59.945391 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" podUID="19a270d9-7165-4dae-942a-5a6daa2cf905" Nov 25 15:35:00 crc kubenswrapper[4800]: I1125 15:35:00.140503 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4"] Nov 25 15:35:00 crc kubenswrapper[4800]: W1125 15:35:00.154749 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5a433244_abb3_4d43_a2b3_3266fd7234c0.slice/crio-27c52df93b930b47e5e1223a030df9708e90472d9bb383c142272c63a6f8f6a4 WatchSource:0}: Error finding container 27c52df93b930b47e5e1223a030df9708e90472d9bb383c142272c63a6f8f6a4: Status 404 returned error can't find the container with id 27c52df93b930b47e5e1223a030df9708e90472d9bb383c142272c63a6f8f6a4 Nov 25 15:35:00 crc kubenswrapper[4800]: I1125 15:35:00.942438 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" event={"ID":"5a433244-abb3-4d43-a2b3-3266fd7234c0","Type":"ContainerStarted","Data":"27c52df93b930b47e5e1223a030df9708e90472d9bb383c142272c63a6f8f6a4"} Nov 25 15:35:08 crc kubenswrapper[4800]: I1125 15:35:08.036942 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" event={"ID":"5a433244-abb3-4d43-a2b3-3266fd7234c0","Type":"ContainerStarted","Data":"fc2c403a95503cde1fbd92a79a23ff52363775fd2fa0460a280af9ca2bbf83bc"} Nov 25 15:35:08 crc kubenswrapper[4800]: I1125 15:35:08.037710 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:35:08 crc kubenswrapper[4800]: I1125 15:35:08.075736 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" podStartSLOduration=13.075708785 podStartE2EDuration="13.075708785s" podCreationTimestamp="2025-11-25 15:34:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:35:08.067123667 +0000 UTC m=+1069.121532149" watchObservedRunningTime="2025-11-25 15:35:08.075708785 +0000 UTC m=+1069.130117267" Nov 25 15:35:10 crc kubenswrapper[4800]: E1125 15:35:10.375384 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a" Nov 25 15:35:10 crc kubenswrapper[4800]: E1125 15:35:10.376100 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l224z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-58bb8d67cc-r79jq_openstack-operators(4682bc2d-38c7-4001-8dd8-095f444caa42): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:35:11 crc kubenswrapper[4800]: E1125 15:35:11.397906 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:b582189b55fddc180a6d468c9dba7078009a693db37b4093d4ba0c99ec675377" Nov 25 15:35:11 crc kubenswrapper[4800]: E1125 15:35:11.398201 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:b582189b55fddc180a6d468c9dba7078009a693db37b4093d4ba0c99ec675377,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rswcf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-5bfcdc958c-ng4ng_openstack-operators(1d4a540a-f8e3-4566-9d9f-05b2b5e26399): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:35:12 crc kubenswrapper[4800]: E1125 15:35:12.519743 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f" Nov 25 15:35:12 crc kubenswrapper[4800]: E1125 15:35:12.520424 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8662h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-567f98c9d-pbs6h_openstack-operators(e9539fdf-f01c-42c5-89a2-681d5c6142b4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:35:13 crc kubenswrapper[4800]: E1125 15:35:13.409202 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a" Nov 25 15:35:13 crc kubenswrapper[4800]: E1125 15:35:13.409526 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9chfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-748dc6576f-6clsb_openstack-operators(c13855f7-d2e2-4a35-a7f0-2fe506ad36a5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:35:14 crc kubenswrapper[4800]: E1125 15:35:14.007866 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd" Nov 25 15:35:14 crc kubenswrapper[4800]: E1125 15:35:14.008624 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent@sha256:7dbadf7b98f2f305f9f1382f55a084c8ca404f4263f76b28e56bd0dc437e2192,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner@sha256:0473ff9eec0da231e2d0a10bf1abbe1dfa1a0f95b8f619e3a07605386951449a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api@sha256:c8101c77a82eae4407e41e1fd766dfc6e1b7f9ed1679e3efb6f91ff97a1557b2,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator@sha256:eb9743b21bbadca6f7cb9ac4fc46b5d58c51c674073c7e1121f4474a71304071,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener@sha256:3d81f839b98c2e2a5bf0da79f2f9a92dff7d0a3c5a830b0e95c89dad8cf98a6a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier@sha256:d19ac99249b47dd8ea16cd6aaa5756346aa8a2f119ee50819c15c5366efb417d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24@sha256:8536169e5537fe6c330eba814248abdcf39cdd8f7e7336034d74e6fda9544050,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:4c93a5cccb9971e24f05daf93b3aa11ba71752bc3469a1a1a2c4906f92f69645,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener@sha256:4f1fa337760e82bfd67cdd142a97c121146dd7e621daac161940dd5e4ddb80dc,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker@sha256:3613b345d5baed98effd906f8b0242d863e14c97078ea473ef01fe1b0afc46f3,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute@sha256:9f9f367ed4c85efb16c3a74a4bb707ff0db271d7bc5abc70a71e984b55f43003,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi@sha256:b73ad22b4955b06d584bce81742556d8c0c7828c495494f8ea7c99391c61b70f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter@sha256:7211a617ec657701ca819aa0ba28e1d5750f5bf2c1391b755cc4a48cc360b0fa,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification@sha256:aa1d3aaf6b394621ed4089a98e0a82b763f467e8b5c5db772f9fdf99fc86e333,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core@sha256:09b5017c95d7697e66b9c64846bc48ef5826a009cba89b956ec54561e5f4a2d1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup@sha256:d6661053141b6df421288a7c9968a155ab82e478c1d75ab41f2cebe2f0ca02d2,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler@sha256:ce2d63258cb4e7d0d1c07234de6889c5434464190906798019311a1c7cf6387f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume@sha256:0485ef9e5b4437f7cd2ba54034a87722ce4669ee86b3773c6b0c037ed8000e91,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api@sha256:43f8a00cd714c59f2c517fe6fabb63b16528191633eb39eef4002d49ace7ddb0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor@sha256:876a222b97b38b35012883c4146c8d102d019fcbe79f26d731d6f2e225e22ffc,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api@sha256:ff0c553ceeb2e0f44b010e37dc6d0db8a251797b88e56468b7cf7f05253e4232,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9@sha256:624f553f073af7493d34828b074adc9981cce403edd8e71482c7307008479fd9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central@sha256:e3874936a518c8560339db8f840fc5461885819f6050b5de8d3ab9199bea5094,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns@sha256:1cea25f1d2a45affc80c46fb9d427749d3f06b61590ac6070a2910e3ec8a4e5d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer@sha256:e36d5b9a65194f12f7b01c6422ba3ed52a687fd1695fbb21f4986c67d9f9317f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound@sha256:8b21bec527d54cd766e277889df6bcccd2baeaa946274606b986c0c3b7ca689f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker@sha256:45aceca77f8fcf61127f0da650bdfdf11ede9b0944c78b63fab819d03283f96b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr@sha256:709ac58998927dd61786821ae1e63343fd97ccf5763aac5edb4583eea9401d22,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid@sha256:867d4ef7c21f75e6030a685b5762ab4d84b671316ed6b98d75200076e93342cd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler@sha256:581b65b646301e0fcb07582150ba63438f1353a85bf9acf1eb2acb4ce71c58bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron@sha256:2b90da93550b99d2fcfa95bd819f3363aa68346a416f8dc7baac3e9c5f487761,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd@sha256:6f86db36d668348be8c5b46dcda8b1fa23d34bfdc07164fbcbe7a6327fb4de24,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent@sha256:8cde52cef8795d1c91983b100d86541c7718160ec260fe0f97b96add4c2c8ee8,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:a9583cb3baf440d2358ef041373833afbeae60da8159dd031502379901141620,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent@sha256:835ebed082fe1c45bd799d1d5357595ce63efeb05ca876f26b08443facb9c164,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent@sha256:011d682241db724bc40736c9b54d2ea450ea7e6be095b1ff5fa28c8007466775,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter@sha256:39c642b2b337e38c18e80266fb14383754178202f40103646337722a594d984c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent@sha256:2025da90cff8f563deb08bee71efe16d4078edc2a767b2e225cca5c77f1aa2f9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter@sha256:d339ba049bbd1adccb795962bf163f5b22fd84dea865d88b9eb525e46247d6bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api@sha256:26bd7b0bd6070856aefef6fe754c547d55c056396ea30d879d34c2d49b5a1d29,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api@sha256:ff46cd5e0e13d105c4629e78c2734a50835f06b6a1e31da9e0462981d10c4be3,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn@sha256:5b4fd0c2b76fa5539f74687b11c5882d77bd31352452322b37ff51fa18f12a61,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine@sha256:5e03376bd895346dc8f627ca15ded942526ed8b5e92872f453ce272e694d18d4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon@sha256:65b94ff9fcd486845fb0544583bf2a973246a61a0ad32340fb92d632285f1057,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached@sha256:36a0fb31978aee0ded2483de311631e64a644d0b0685b5b055f65ede7eb8e8a2,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis@sha256:5f6045841aff0fde6f684a34cdf49f8dc7b2c3bcbdeab201f1058971e0c5f79e,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api@sha256:448f4e1b740c30936e340bd6e8534d78c83357bf373a4223950aa64d3484f007,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor@sha256:b68e3615af8a0eb0ef6bf9ceeef59540a6f4a9a85f6078a3620be115c73a7db8,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector@sha256:7eae01cf60383e523c9cd94d158a9162120a7370829a1dad20fdea6b0fd660bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent@sha256:28cc10501788081eb61b5a1af35546191a92741f4f109df54c74e2b19439d0f9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe@sha256:9a616e37acfd120612f78043237a8541266ba34883833c9beb43f3da313661ad,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent@sha256:6b1be6cd94a0942259bca5d5d2c30cc7de4a33276b61f8ae3940226772106256,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone@sha256:02d2c22d15401574941fbe057095442dee0d6f7a0a9341de35d25e6a12a3fe4b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics@sha256:db384bf43222b066c378e77027a675d4cd9911107adba46c2922b3a55e10d6fb,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api@sha256:fc3b3a36b74fd653946723c54b208072d52200635850b531e9d595a7aaea5a01,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler@sha256:7850ccbff320bf9a1c9c769c1c70777eb97117dd8cd5ae4435be9b4622cf807a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share@sha256:397dac7e39cf40d14a986e6ec4a60fb698ca35c197d0db315b1318514cc6d1d4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils@sha256:1c95142a36276686e720f86423ee171dc9adcc1e89879f627545b7c906ccd9bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api@sha256:e331a8fde6638e5ba154c4f0b38772a9a424f60656f2777245975fb1fa02f07d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute@sha256:b6e1e8a249d36ef36c6ac4170af1e043dda1ccc0f9672832d3ff151bf3533076,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor@sha256:cd3cf7a34053e850b4d4f9f4ea4c74953a54a42fd18e47d7c01d44a88923e925,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy@sha256:aee28476344fc0cc148fbe97daf9b1bfcedc22001550bba4bdc4e84be7b6989d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler@sha256:cfa0b92c976603ee2a937d34013a238fcd8aa75f998e50642e33489f14124633,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api@sha256:73c2f2d6eecf88acf4e45b133c8373d9bb006b530e0aff0b28f3b7420620a874,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager@sha256:927b405cc04abe5ff716186e8d35e2dc5fad1c8430194659ee6617d74e4e055d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping@sha256:6154d7cebd7c339afa5b86330262156171743aa5b79c2b78f9a2f378005ed8fb,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog@sha256:e2db2f4af8d3d0be7868c6efef0189f3a2c74a8f96ae10e3f991cdf83feaef29,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker@sha256:c773629df257726a6d3cacc24a6e4df0babcd7d37df04e6d14676a8da028b9c9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient@sha256:776211111e2e6493706dbc49a3ba44f31d1b947919313ed3a0f35810e304ec52,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather@sha256:ae4a20d9aad04cfaeaa3105fa8e37db4216c3b17530bc98daf1204555bc23485,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter@sha256:ecd56e6733c475f2d441344fd98f288c3eac0261ba113695fec7520a954ccbc7,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi@sha256:7cccf24ad0a152f90ca39893064f48a1656950ee8142685a5d482c71f0bdc9f5,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:af46761060c7987e1dee5f14c06d85b46f12ad8e09c83d4246ab4e3a65dfda3e,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base@sha256:05450b48f6b5352b2686a26e933e8727748edae2ae9652d9164b7d7a1817c55a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server@sha256:fc9c99eeef91523482bd8f92661b393287e1f2a24ad2ba9e33191f8de9af74cf,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd@sha256:3e4ecc02b4b5e0860482a93599ba9ca598c5ce26c093c46e701f96fe51acb208,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server@sha256:2346037e064861c7892690d2e8b3e1eea1a26ce3c3a11fda0b41301965bc828c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api@sha256:7dd2e0dbb6bb5a6cecd1763e43479ca8cb6a0c502534e83c8795c0da2b50e099,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account@sha256:c26c3ff9cabe3593ceb10006e782bf9391ac14785768ce9eec4f938c2d3cf228,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object@sha256:daa45220bb1c47922d0917aa8fe423bb82b03a01429f1c9e37635e701e352d71,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server@sha256:a80a074e227d3238bb6f285788a9e886ae7a5909ccbc5c19c93c369bdfe5b3b8,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all@sha256:58ac66ca1be01fe0157977bd79a26cde4d0de153edfaf4162367c924826b2ef4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api@sha256:5e3f93f3085cfd94e599bbf771635477e5e015b7c22c624edca926459d369e69,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier@sha256:6edd7f91b0fc53dd91194f6e0c206a98e5667bb7a9c5f2a423349612d7300506,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine@sha256:2590b6c6197091ca423dfb93a609e0d843b270ad642f0c1920ac23f79aec8dca,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8bpcz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-b58f89467-5w9fm_openstack-operators(e09fc035-4c04-486d-b4e7-6638d278c1d6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:35:19 crc kubenswrapper[4800]: I1125 15:35:19.487156 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-hh5m4" Nov 25 15:35:20 crc kubenswrapper[4800]: I1125 15:35:20.131915 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl" event={"ID":"bf54f59d-2a26-4502-bb7d-b9aeabeb1645","Type":"ContainerStarted","Data":"b38f1288237cf001b12119054abafbbc578f1b4bc06dfd19441d040786252ed9"} Nov 25 15:35:20 crc kubenswrapper[4800]: I1125 15:35:20.138178 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7" event={"ID":"a206eabc-2689-4dc2-ac1a-066100be9382","Type":"ContainerStarted","Data":"6a01c4bd54936121b083efb622021c7327e1a9c1676e069d0b6db356c002b876"} Nov 25 15:35:20 crc kubenswrapper[4800]: I1125 15:35:20.146430 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd" event={"ID":"959b58dd-55f0-4f7a-aa2e-24a868241ebe","Type":"ContainerStarted","Data":"9074720844b7cd8329789b4d5855a22b50776a35fa642db0e5b4a56bb342efb2"} Nov 25 15:35:20 crc kubenswrapper[4800]: I1125 15:35:20.148424 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf" event={"ID":"671d7b5e-65d1-4a29-9ef6-fd0e770203c5","Type":"ContainerStarted","Data":"ddcd2a46c0b2c3893b7561e5a3d901ab737dbe56e2b154a6b14a886ef7f7cbe4"} Nov 25 15:35:20 crc kubenswrapper[4800]: I1125 15:35:20.150424 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74" event={"ID":"e158909e-b254-40c0-95a8-9d5056889e6a","Type":"ContainerStarted","Data":"8adfb2ac7508d04450cdf46ad6d3d9b5116df55c8d350c26ba148cc74f71993a"} Nov 25 15:35:20 crc kubenswrapper[4800]: I1125 15:35:20.169961 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj" event={"ID":"16bad9b7-305a-4081-a7f5-671fd1a51f31","Type":"ContainerStarted","Data":"035f104186f4b3af7b689ce433f92801ce19a86ab5cc2e8e1f161edf1d0a0eef"} Nov 25 15:35:20 crc kubenswrapper[4800]: I1125 15:35:20.183935 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx" event={"ID":"b3ae53a0-88c1-4617-8052-f95d3b6d78d3","Type":"ContainerStarted","Data":"056813417d39b3324874ee1305962917de33806112391a8058b59ec38b6488bb"} Nov 25 15:35:20 crc kubenswrapper[4800]: I1125 15:35:20.194242 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx" event={"ID":"8976a97d-112c-4d56-b82f-74648f987a62","Type":"ContainerStarted","Data":"561e6096946a7ad7ed88a47202ba41035d814b9fe4757d8503a57a83c2afdd14"} Nov 25 15:35:20 crc kubenswrapper[4800]: I1125 15:35:20.203883 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" event={"ID":"f2d7618e-4f44-4ad7-b381-26039921a683","Type":"ContainerStarted","Data":"519776e42214407cd48eaa28b9770b597119a733e37c347469190b8e74ae1d54"} Nov 25 15:35:20 crc kubenswrapper[4800]: I1125 15:35:20.205594 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" event={"ID":"19a270d9-7165-4dae-942a-5a6daa2cf905","Type":"ContainerStarted","Data":"35b2afa88072b16c322e11830450d6152727be9f7fcf59acd4ce8830d43cdad9"} Nov 25 15:35:20 crc kubenswrapper[4800]: I1125 15:35:20.207282 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4" event={"ID":"05bb3a64-18f5-4b8d-bf4f-f46c5ba6c0ec","Type":"ContainerStarted","Data":"8853033fe728c4fc3d81d57778a9e54db766ef6e6ef34b572b07c84a100ca8f1"} Nov 25 15:35:20 crc kubenswrapper[4800]: I1125 15:35:20.208518 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8" event={"ID":"cb7f9b0c-c801-4935-8d52-02179a0cfed0","Type":"ContainerStarted","Data":"98c8b3f957c04fcaaa294fe5944af69871d766935bcb04bde88409af70228a24"} Nov 25 15:35:28 crc kubenswrapper[4800]: I1125 15:35:28.601455 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" event={"ID":"b547724f-2a34-47b0-9125-668496d7dc6d","Type":"ContainerStarted","Data":"cd04c25b5ecc77c69e3593413765b0d50a1a119ce1c99bda758536550a1cb0fb"} Nov 25 15:35:30 crc kubenswrapper[4800]: I1125 15:35:30.114235 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 15:35:32 crc kubenswrapper[4800]: E1125 15:35:32.058740 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 25 15:35:32 crc kubenswrapper[4800]: E1125 15:35:32.059619 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dh445,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-wbx2v_openstack-operators(762dc32c-7527-4ab4-a5fc-b7780e6da7d2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:35:32 crc kubenswrapper[4800]: E1125 15:35:32.061152 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" podUID="762dc32c-7527-4ab4-a5fc-b7780e6da7d2" Nov 25 15:35:32 crc kubenswrapper[4800]: E1125 15:35:32.928271 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f" Nov 25 15:35:32 crc kubenswrapper[4800]: E1125 15:35:32.928504 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-k4927,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-95x9b_openstack-operators(71dd46d2-b3b0-4999-800c-03ac0a9758c6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:35:33 crc kubenswrapper[4800]: E1125 15:35:33.182657 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 25 15:35:33 crc kubenswrapper[4800]: E1125 15:35:33.182866 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9chfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-748dc6576f-6clsb_openstack-operators(c13855f7-d2e2-4a35-a7f0-2fe506ad36a5): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 25 15:35:33 crc kubenswrapper[4800]: E1125 15:35:33.184127 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb" podUID="c13855f7-d2e2-4a35-a7f0-2fe506ad36a5" Nov 25 15:35:33 crc kubenswrapper[4800]: E1125 15:35:33.374196 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 25 15:35:33 crc kubenswrapper[4800]: E1125 15:35:33.374482 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8bpcz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-b58f89467-5w9fm_openstack-operators(e09fc035-4c04-486d-b4e7-6638d278c1d6): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 25 15:35:33 crc kubenswrapper[4800]: E1125 15:35:33.375700 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" podUID="e09fc035-4c04-486d-b4e7-6638d278c1d6" Nov 25 15:35:35 crc kubenswrapper[4800]: I1125 15:35:35.673855 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" event={"ID":"fbc462d8-f085-4ffc-af8c-b91677ff3619","Type":"ContainerStarted","Data":"3a6de4dddaba817cfa990404808c89b09c9716b4bb3084dee0e99d9bb98d120a"} Nov 25 15:35:35 crc kubenswrapper[4800]: I1125 15:35:35.681488 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" event={"ID":"51bc0d61-40c5-404a-978c-414717c8e3e9","Type":"ContainerStarted","Data":"049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb"} Nov 25 15:35:36 crc kubenswrapper[4800]: E1125 15:35:36.395396 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h" podUID="e9539fdf-f01c-42c5-89a2-681d5c6142b4" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.693399 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7" event={"ID":"a206eabc-2689-4dc2-ac1a-066100be9382","Type":"ContainerStarted","Data":"b388d2172da2e3f3376acbe792d9dc3f90e4e8251a37b2b1904a9cb43953e33b"} Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.693947 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.696156 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8" event={"ID":"cb7f9b0c-c801-4935-8d52-02179a0cfed0","Type":"ContainerStarted","Data":"c71348238c80cdb8c82954591608b2959c4586fbd0af6aa280ac3d972e91fae6"} Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.696490 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.702162 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.702609 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd" event={"ID":"959b58dd-55f0-4f7a-aa2e-24a868241ebe","Type":"ContainerStarted","Data":"652bc920f5282a65860c865247ab7ffb96cb05c7af6258d345ee36b42ee17fd4"} Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.703440 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.704606 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.705296 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.705578 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj" event={"ID":"16bad9b7-305a-4081-a7f5-671fd1a51f31","Type":"ContainerStarted","Data":"01ef8f7faa4b47f916e43635fb06b7c76fd6e4431645306a1f30710b3cc96755"} Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.706155 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.707894 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx" event={"ID":"b3ae53a0-88c1-4617-8052-f95d3b6d78d3","Type":"ContainerStarted","Data":"711996bc868d88ad8d1b5bd412a361aae2d628346f47c670400379b8296f80e1"} Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.708533 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.708660 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.710129 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h" event={"ID":"e9539fdf-f01c-42c5-89a2-681d5c6142b4","Type":"ContainerStarted","Data":"feb716db08d4e5ee464f41f3efb99327d31b839d4380eb29f1e1ae5eeb125a17"} Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.710938 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.713570 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" event={"ID":"fbc462d8-f085-4ffc-af8c-b91677ff3619","Type":"ContainerStarted","Data":"af5688615e2a814c41a1b563b54acd1c5e0d8cc8e85e714384f30701478a98b4"} Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.713737 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.725867 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9mbm7" podStartSLOduration=3.631910691 podStartE2EDuration="42.72582735s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.531194994 +0000 UTC m=+1057.585603476" lastFinishedPulling="2025-11-25 15:35:35.625111643 +0000 UTC m=+1096.679520135" observedRunningTime="2025-11-25 15:35:36.724443354 +0000 UTC m=+1097.778851836" watchObservedRunningTime="2025-11-25 15:35:36.72582735 +0000 UTC m=+1097.780235842" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.747579 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-d4svd" podStartSLOduration=3.028808842 podStartE2EDuration="42.746827376s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:55.884111021 +0000 UTC m=+1056.938519503" lastFinishedPulling="2025-11-25 15:35:35.602129545 +0000 UTC m=+1096.656538037" observedRunningTime="2025-11-25 15:35:36.746210849 +0000 UTC m=+1097.800619331" watchObservedRunningTime="2025-11-25 15:35:36.746827376 +0000 UTC m=+1097.801235858" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.779125 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-5xcgj" podStartSLOduration=3.7589446 podStartE2EDuration="42.779103769s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.601245677 +0000 UTC m=+1057.655654149" lastFinishedPulling="2025-11-25 15:35:35.621404836 +0000 UTC m=+1096.675813318" observedRunningTime="2025-11-25 15:35:36.777332322 +0000 UTC m=+1097.831740804" watchObservedRunningTime="2025-11-25 15:35:36.779103769 +0000 UTC m=+1097.833512251" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.802627 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-6bztx" podStartSLOduration=3.369883466 podStartE2EDuration="42.802604201s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.189475951 +0000 UTC m=+1057.243884443" lastFinishedPulling="2025-11-25 15:35:35.622196686 +0000 UTC m=+1096.676605178" observedRunningTime="2025-11-25 15:35:36.801138853 +0000 UTC m=+1097.855547335" watchObservedRunningTime="2025-11-25 15:35:36.802604201 +0000 UTC m=+1097.857012683" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.830596 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-774b86978c-qfdl8" podStartSLOduration=3.434692411 podStartE2EDuration="42.830569571s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.217350879 +0000 UTC m=+1057.271759361" lastFinishedPulling="2025-11-25 15:35:35.613228019 +0000 UTC m=+1096.667636521" observedRunningTime="2025-11-25 15:35:36.829064932 +0000 UTC m=+1097.883473434" watchObservedRunningTime="2025-11-25 15:35:36.830569571 +0000 UTC m=+1097.884978053" Nov 25 15:35:36 crc kubenswrapper[4800]: I1125 15:35:36.878479 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" podStartSLOduration=6.705183001 podStartE2EDuration="42.878446898s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.702759883 +0000 UTC m=+1057.757168365" lastFinishedPulling="2025-11-25 15:35:32.87602375 +0000 UTC m=+1093.930432262" observedRunningTime="2025-11-25 15:35:36.86753649 +0000 UTC m=+1097.921944962" watchObservedRunningTime="2025-11-25 15:35:36.878446898 +0000 UTC m=+1097.932855380" Nov 25 15:35:37 crc kubenswrapper[4800]: E1125 15:35:37.084652 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng" podUID="1d4a540a-f8e3-4566-9d9f-05b2b5e26399" Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.725271 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74" event={"ID":"e158909e-b254-40c0-95a8-9d5056889e6a","Type":"ContainerStarted","Data":"0747481d8de4acc4a59305f25b7849d08bbac487629c4b04849891e06908b1ee"} Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.725394 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74" Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.729797 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4" event={"ID":"05bb3a64-18f5-4b8d-bf4f-f46c5ba6c0ec","Type":"ContainerStarted","Data":"f48f69fda68e4e6caed36b53fede466205048f09d09bd5b12f3a0defa2484df5"} Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.729927 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4" Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.731946 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74" Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.732392 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4" Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.732520 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng" event={"ID":"1d4a540a-f8e3-4566-9d9f-05b2b5e26399","Type":"ContainerStarted","Data":"8ea9e7c2b1cef92a410fdce78610113787f21a6e17b7b4f3798614fe16e69de5"} Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.736391 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx" event={"ID":"8976a97d-112c-4d56-b82f-74648f987a62","Type":"ContainerStarted","Data":"9dd04d3831d92f3aa4fe689625b1a78cb5814eaee68214193cd0ba78c179b811"} Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.737510 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx" Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.738463 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx" Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.740312 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" event={"ID":"51bc0d61-40c5-404a-978c-414717c8e3e9","Type":"ContainerStarted","Data":"1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1"} Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.740453 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.744041 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" event={"ID":"e09fc035-4c04-486d-b4e7-6638d278c1d6","Type":"ContainerStarted","Data":"d8f355b16658333194606e8d91f6a07d2a34fd43dcbfc3f55411c06e6d3e1fcd"} Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.747071 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-7kp74" podStartSLOduration=4.775001964 podStartE2EDuration="43.747047959s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.630039359 +0000 UTC m=+1057.684447841" lastFinishedPulling="2025-11-25 15:35:35.602085344 +0000 UTC m=+1096.656493836" observedRunningTime="2025-11-25 15:35:37.743310968 +0000 UTC m=+1098.797719450" watchObservedRunningTime="2025-11-25 15:35:37.747047959 +0000 UTC m=+1098.801456442" Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.760732 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" event={"ID":"19a270d9-7165-4dae-942a-5a6daa2cf905","Type":"ContainerStarted","Data":"6f65b5b3e47f445a8bc2e2802bd49925a9d15387d48dd864d9f856cb29bbac2c"} Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.776008 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" podStartSLOduration=7.394699511 podStartE2EDuration="43.775979282s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.672911103 +0000 UTC m=+1057.727319585" lastFinishedPulling="2025-11-25 15:35:33.054190874 +0000 UTC m=+1094.108599356" observedRunningTime="2025-11-25 15:35:37.770858358 +0000 UTC m=+1098.825266840" watchObservedRunningTime="2025-11-25 15:35:37.775979282 +0000 UTC m=+1098.830387764" Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.793682 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-gnfx4" podStartSLOduration=4.082606231 podStartE2EDuration="43.79365933s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:55.911721442 +0000 UTC m=+1056.966129924" lastFinishedPulling="2025-11-25 15:35:35.622774531 +0000 UTC m=+1096.677183023" observedRunningTime="2025-11-25 15:35:37.789205901 +0000 UTC m=+1098.843614393" watchObservedRunningTime="2025-11-25 15:35:37.79365933 +0000 UTC m=+1098.848067812" Nov 25 15:35:37 crc kubenswrapper[4800]: I1125 15:35:37.831103 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-vdqnx" podStartSLOduration=4.099949743 podStartE2EDuration="43.831073353s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:55.8836746 +0000 UTC m=+1056.938083082" lastFinishedPulling="2025-11-25 15:35:35.6147982 +0000 UTC m=+1096.669206692" observedRunningTime="2025-11-25 15:35:37.822949918 +0000 UTC m=+1098.877358400" watchObservedRunningTime="2025-11-25 15:35:37.831073353 +0000 UTC m=+1098.885481835" Nov 25 15:35:38 crc kubenswrapper[4800]: E1125 15:35:38.367463 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq" podUID="4682bc2d-38c7-4001-8dd8-095f444caa42" Nov 25 15:35:38 crc kubenswrapper[4800]: E1125 15:35:38.367628 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" podUID="71dd46d2-b3b0-4999-800c-03ac0a9758c6" Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.772346 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf" event={"ID":"671d7b5e-65d1-4a29-9ef6-fd0e770203c5","Type":"ContainerStarted","Data":"2c140dd49defc600185457d144af14fc30008cc6ada7d398fae40642139433ba"} Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.773962 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq" event={"ID":"4682bc2d-38c7-4001-8dd8-095f444caa42","Type":"ContainerStarted","Data":"2c8f94157654c01cbc38d608284dd978b600a96dd3c3bac219f9a2e8766ade79"} Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.775354 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" event={"ID":"71dd46d2-b3b0-4999-800c-03ac0a9758c6","Type":"ContainerStarted","Data":"2fc337e958ef4dd4590106cc840fb67b12110381c4020079e78d54c1195a72ff"} Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.778147 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb" event={"ID":"c13855f7-d2e2-4a35-a7f0-2fe506ad36a5","Type":"ContainerStarted","Data":"34c64420ba5fbac2a35957a8f50ec2474f40fa58b9392a31fc331caf685c5b42"} Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.778276 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb" event={"ID":"c13855f7-d2e2-4a35-a7f0-2fe506ad36a5","Type":"ContainerStarted","Data":"eaa92471fae02967c134685972019eadaa90575537158c8377edcc07e84b3965"} Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.790755 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" event={"ID":"b547724f-2a34-47b0-9125-668496d7dc6d","Type":"ContainerStarted","Data":"63539cd4beae69705404e8c510b6ddfae1a107e849af377b168a67026ccd2356"} Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.792866 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.811425 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.813534 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl" event={"ID":"bf54f59d-2a26-4502-bb7d-b9aeabeb1645","Type":"ContainerStarted","Data":"a1ae76b22826c548afe2be3fbf1f071ad97a143a600d69157f00152747a2fcd3"} Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.816201 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl" Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.825759 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl" Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.827412 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" event={"ID":"e09fc035-4c04-486d-b4e7-6638d278c1d6","Type":"ContainerStarted","Data":"1871b73547a139c09e48b410efa84a384d199a22950ebf4fbed436aa1fcc43ca"} Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.828140 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.831138 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" event={"ID":"f2d7618e-4f44-4ad7-b381-26039921a683","Type":"ContainerStarted","Data":"608fb9c7c02f28a79b36d20f3a90adc87f1a7f37f0660f3e4b4a5b3def47eacf"} Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.843988 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.844437 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.853517 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf" podStartSLOduration=5.775755546 podStartE2EDuration="44.853494407s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.600725514 +0000 UTC m=+1057.655134006" lastFinishedPulling="2025-11-25 15:35:35.678464385 +0000 UTC m=+1096.732872867" observedRunningTime="2025-11-25 15:35:38.822970967 +0000 UTC m=+1099.877379469" watchObservedRunningTime="2025-11-25 15:35:38.853494407 +0000 UTC m=+1099.907902879" Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.898782 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-865xl" podStartSLOduration=4.185234971 podStartE2EDuration="44.898762188s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.600261601 +0000 UTC m=+1057.654670083" lastFinishedPulling="2025-11-25 15:35:37.313788808 +0000 UTC m=+1098.368197300" observedRunningTime="2025-11-25 15:35:38.89343111 +0000 UTC m=+1099.947839592" watchObservedRunningTime="2025-11-25 15:35:38.898762188 +0000 UTC m=+1099.953170670" Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.930745 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-vhrgb" podStartSLOduration=5.859110404 podStartE2EDuration="44.930724014s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.701928771 +0000 UTC m=+1057.756337253" lastFinishedPulling="2025-11-25 15:35:35.773542381 +0000 UTC m=+1096.827950863" observedRunningTime="2025-11-25 15:35:38.923203389 +0000 UTC m=+1099.977611901" watchObservedRunningTime="2025-11-25 15:35:38.930724014 +0000 UTC m=+1099.985132496" Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.968400 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" podStartSLOduration=6.7447551820000005 podStartE2EDuration="44.968373303s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:57.498255673 +0000 UTC m=+1058.552664155" lastFinishedPulling="2025-11-25 15:35:35.721873794 +0000 UTC m=+1096.776282276" observedRunningTime="2025-11-25 15:35:38.966055908 +0000 UTC m=+1100.020464400" watchObservedRunningTime="2025-11-25 15:35:38.968373303 +0000 UTC m=+1100.022781785" Nov 25 15:35:38 crc kubenswrapper[4800]: I1125 15:35:38.986996 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-ksrss" podStartSLOduration=4.427884987 podStartE2EDuration="44.986968042s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.671561978 +0000 UTC m=+1057.725970470" lastFinishedPulling="2025-11-25 15:35:37.230645043 +0000 UTC m=+1098.285053525" observedRunningTime="2025-11-25 15:35:38.985807204 +0000 UTC m=+1100.040215696" watchObservedRunningTime="2025-11-25 15:35:38.986968042 +0000 UTC m=+1100.041376524" Nov 25 15:35:39 crc kubenswrapper[4800]: I1125 15:35:39.012856 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" podStartSLOduration=6.064889861 podStartE2EDuration="45.012809148s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.673493869 +0000 UTC m=+1057.727902351" lastFinishedPulling="2025-11-25 15:35:35.621413146 +0000 UTC m=+1096.675821638" observedRunningTime="2025-11-25 15:35:39.003081955 +0000 UTC m=+1100.057490457" watchObservedRunningTime="2025-11-25 15:35:39.012809148 +0000 UTC m=+1100.067217640" Nov 25 15:35:39 crc kubenswrapper[4800]: E1125 15:35:39.609449 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" podUID="71dd46d2-b3b0-4999-800c-03ac0a9758c6" Nov 25 15:35:39 crc kubenswrapper[4800]: I1125 15:35:39.840406 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf" Nov 25 15:35:39 crc kubenswrapper[4800]: I1125 15:35:39.843266 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-brcmf" Nov 25 15:35:39 crc kubenswrapper[4800]: I1125 15:35:39.859237 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb" podStartSLOduration=5.13907891 podStartE2EDuration="45.859211479s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.549628671 +0000 UTC m=+1057.604037163" lastFinishedPulling="2025-11-25 15:35:37.26976125 +0000 UTC m=+1098.324169732" observedRunningTime="2025-11-25 15:35:39.856563292 +0000 UTC m=+1100.910971794" watchObservedRunningTime="2025-11-25 15:35:39.859211479 +0000 UTC m=+1100.913619961" Nov 25 15:35:41 crc kubenswrapper[4800]: I1125 15:35:41.860602 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq" event={"ID":"4682bc2d-38c7-4001-8dd8-095f444caa42","Type":"ContainerStarted","Data":"9bd4bcf8823a9781a4951e4af02713ace57f8faedd108feb10dcb9f4a89b4c72"} Nov 25 15:35:41 crc kubenswrapper[4800]: I1125 15:35:41.861214 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq" Nov 25 15:35:41 crc kubenswrapper[4800]: I1125 15:35:41.878477 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h" event={"ID":"e9539fdf-f01c-42c5-89a2-681d5c6142b4","Type":"ContainerStarted","Data":"c3f3610985df16020db1cfadd59e35bf71124ab79b78f24ccbcd8811333df411"} Nov 25 15:35:41 crc kubenswrapper[4800]: I1125 15:35:41.878757 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h" Nov 25 15:35:41 crc kubenswrapper[4800]: I1125 15:35:41.885378 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng" event={"ID":"1d4a540a-f8e3-4566-9d9f-05b2b5e26399","Type":"ContainerStarted","Data":"3f5c8f5b28b3c9ecbb771b348a9588bc8542657685e17f48dbafe292cbfe217c"} Nov 25 15:35:41 crc kubenswrapper[4800]: I1125 15:35:41.898701 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq" podStartSLOduration=3.181514924 podStartE2EDuration="47.898677436s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.60022174 +0000 UTC m=+1057.654630222" lastFinishedPulling="2025-11-25 15:35:41.317384262 +0000 UTC m=+1102.371792734" observedRunningTime="2025-11-25 15:35:41.895143299 +0000 UTC m=+1102.949551781" watchObservedRunningTime="2025-11-25 15:35:41.898677436 +0000 UTC m=+1102.953085908" Nov 25 15:35:41 crc kubenswrapper[4800]: I1125 15:35:41.915133 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng" podStartSLOduration=3.877530362 podStartE2EDuration="47.91511268s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.600746954 +0000 UTC m=+1057.655155436" lastFinishedPulling="2025-11-25 15:35:40.638329272 +0000 UTC m=+1101.692737754" observedRunningTime="2025-11-25 15:35:41.913710247 +0000 UTC m=+1102.968118729" watchObservedRunningTime="2025-11-25 15:35:41.91511268 +0000 UTC m=+1102.969521152" Nov 25 15:35:41 crc kubenswrapper[4800]: I1125 15:35:41.932111 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h" podStartSLOduration=3.996220044 podStartE2EDuration="47.932091461s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.701511771 +0000 UTC m=+1057.755920243" lastFinishedPulling="2025-11-25 15:35:40.637383178 +0000 UTC m=+1101.691791660" observedRunningTime="2025-11-25 15:35:41.929600364 +0000 UTC m=+1102.984008836" watchObservedRunningTime="2025-11-25 15:35:41.932091461 +0000 UTC m=+1102.986499943" Nov 25 15:35:42 crc kubenswrapper[4800]: I1125 15:35:42.894660 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng" Nov 25 15:35:44 crc kubenswrapper[4800]: E1125 15:35:44.788142 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" podUID="762dc32c-7527-4ab4-a5fc-b7780e6da7d2" Nov 25 15:35:44 crc kubenswrapper[4800]: I1125 15:35:44.907675 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb" Nov 25 15:35:44 crc kubenswrapper[4800]: I1125 15:35:44.911759 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6clsb" Nov 25 15:35:45 crc kubenswrapper[4800]: I1125 15:35:45.260437 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" Nov 25 15:35:45 crc kubenswrapper[4800]: I1125 15:35:45.263788 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-2fb8f" Nov 25 15:35:45 crc kubenswrapper[4800]: I1125 15:35:45.403448 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-gt68p" Nov 25 15:35:45 crc kubenswrapper[4800]: I1125 15:35:45.714421 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" Nov 25 15:35:46 crc kubenswrapper[4800]: I1125 15:35:46.934886 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-5w9fm" Nov 25 15:35:54 crc kubenswrapper[4800]: I1125 15:35:54.693547 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-ng4ng" Nov 25 15:35:54 crc kubenswrapper[4800]: I1125 15:35:54.956916 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-r79jq" Nov 25 15:35:55 crc kubenswrapper[4800]: I1125 15:35:55.391475 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-pbs6h" Nov 25 15:35:56 crc kubenswrapper[4800]: I1125 15:35:56.019990 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" event={"ID":"71dd46d2-b3b0-4999-800c-03ac0a9758c6","Type":"ContainerStarted","Data":"a4d25a2ff712cf65834eb2225a7265441581efd76b414ba4498b64776e40437d"} Nov 25 15:35:56 crc kubenswrapper[4800]: I1125 15:35:56.021500 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" Nov 25 15:35:56 crc kubenswrapper[4800]: I1125 15:35:56.040989 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" podStartSLOduration=3.620416281 podStartE2EDuration="1m2.040971179s" podCreationTimestamp="2025-11-25 15:34:54 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.826320503 +0000 UTC m=+1057.880728985" lastFinishedPulling="2025-11-25 15:35:55.246875401 +0000 UTC m=+1116.301283883" observedRunningTime="2025-11-25 15:35:56.038641147 +0000 UTC m=+1117.093049629" watchObservedRunningTime="2025-11-25 15:35:56.040971179 +0000 UTC m=+1117.095379661" Nov 25 15:35:59 crc kubenswrapper[4800]: I1125 15:35:59.048099 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" event={"ID":"762dc32c-7527-4ab4-a5fc-b7780e6da7d2","Type":"ContainerStarted","Data":"7e82512dfaa16903321d29b785f724fef97c268382230fc2935fb3b671c17581"} Nov 25 15:36:05 crc kubenswrapper[4800]: I1125 15:36:05.823284 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-864885998-95x9b" Nov 25 15:36:05 crc kubenswrapper[4800]: I1125 15:36:05.863590 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wbx2v" podStartSLOduration=9.424558033 podStartE2EDuration="1m10.86356893s" podCreationTimestamp="2025-11-25 15:34:55 +0000 UTC" firstStartedPulling="2025-11-25 15:34:56.858248618 +0000 UTC m=+1057.912657100" lastFinishedPulling="2025-11-25 15:35:58.297259515 +0000 UTC m=+1119.351667997" observedRunningTime="2025-11-25 15:35:59.069475415 +0000 UTC m=+1120.123883897" watchObservedRunningTime="2025-11-25 15:36:05.86356893 +0000 UTC m=+1126.917977402" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.451432 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-86dkp"] Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.454619 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.461370 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.462675 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.462906 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-qz8nr" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.463035 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.474387 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-86dkp"] Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.520980 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6584b49599-r59k5"] Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.522443 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.525358 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.534803 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-r59k5"] Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.545441 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psxcq\" (UniqueName: \"kubernetes.io/projected/2b052479-1e8c-4559-93a8-8b542485b092-kube-api-access-psxcq\") pod \"dnsmasq-dns-7bdd77c89-86dkp\" (UID: \"2b052479-1e8c-4559-93a8-8b542485b092\") " pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.545508 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-dns-svc\") pod \"dnsmasq-dns-6584b49599-r59k5\" (UID: \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\") " pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.545544 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xw5x\" (UniqueName: \"kubernetes.io/projected/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-kube-api-access-5xw5x\") pod \"dnsmasq-dns-6584b49599-r59k5\" (UID: \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\") " pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.545597 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-config\") pod \"dnsmasq-dns-6584b49599-r59k5\" (UID: \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\") " pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.545626 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b052479-1e8c-4559-93a8-8b542485b092-config\") pod \"dnsmasq-dns-7bdd77c89-86dkp\" (UID: \"2b052479-1e8c-4559-93a8-8b542485b092\") " pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.647673 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-config\") pod \"dnsmasq-dns-6584b49599-r59k5\" (UID: \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\") " pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.647741 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b052479-1e8c-4559-93a8-8b542485b092-config\") pod \"dnsmasq-dns-7bdd77c89-86dkp\" (UID: \"2b052479-1e8c-4559-93a8-8b542485b092\") " pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.647784 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psxcq\" (UniqueName: \"kubernetes.io/projected/2b052479-1e8c-4559-93a8-8b542485b092-kube-api-access-psxcq\") pod \"dnsmasq-dns-7bdd77c89-86dkp\" (UID: \"2b052479-1e8c-4559-93a8-8b542485b092\") " pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.647815 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-dns-svc\") pod \"dnsmasq-dns-6584b49599-r59k5\" (UID: \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\") " pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.647869 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xw5x\" (UniqueName: \"kubernetes.io/projected/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-kube-api-access-5xw5x\") pod \"dnsmasq-dns-6584b49599-r59k5\" (UID: \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\") " pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.648984 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-dns-svc\") pod \"dnsmasq-dns-6584b49599-r59k5\" (UID: \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\") " pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.649054 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b052479-1e8c-4559-93a8-8b542485b092-config\") pod \"dnsmasq-dns-7bdd77c89-86dkp\" (UID: \"2b052479-1e8c-4559-93a8-8b542485b092\") " pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.649083 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-config\") pod \"dnsmasq-dns-6584b49599-r59k5\" (UID: \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\") " pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.673931 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xw5x\" (UniqueName: \"kubernetes.io/projected/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-kube-api-access-5xw5x\") pod \"dnsmasq-dns-6584b49599-r59k5\" (UID: \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\") " pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.674681 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psxcq\" (UniqueName: \"kubernetes.io/projected/2b052479-1e8c-4559-93a8-8b542485b092-kube-api-access-psxcq\") pod \"dnsmasq-dns-7bdd77c89-86dkp\" (UID: \"2b052479-1e8c-4559-93a8-8b542485b092\") " pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.790742 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" Nov 25 15:36:22 crc kubenswrapper[4800]: I1125 15:36:22.842470 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:23 crc kubenswrapper[4800]: I1125 15:36:23.329970 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-86dkp"] Nov 25 15:36:23 crc kubenswrapper[4800]: W1125 15:36:23.390786 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ba88f3b_e0dc_431a_adef_06d261c9f1b0.slice/crio-33233d57f31afa3bb9c74b6c62e06f38aaa9fd57e0f3ace08b5c6ad4cce43a2e WatchSource:0}: Error finding container 33233d57f31afa3bb9c74b6c62e06f38aaa9fd57e0f3ace08b5c6ad4cce43a2e: Status 404 returned error can't find the container with id 33233d57f31afa3bb9c74b6c62e06f38aaa9fd57e0f3ace08b5c6ad4cce43a2e Nov 25 15:36:23 crc kubenswrapper[4800]: I1125 15:36:23.393777 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-r59k5"] Nov 25 15:36:24 crc kubenswrapper[4800]: I1125 15:36:24.281323 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" event={"ID":"2b052479-1e8c-4559-93a8-8b542485b092","Type":"ContainerStarted","Data":"42a0b5f6132580f06f46cd50e78a52d27b61a211c016c2796361d755f8412a86"} Nov 25 15:36:24 crc kubenswrapper[4800]: I1125 15:36:24.283767 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-r59k5" event={"ID":"1ba88f3b-e0dc-431a-adef-06d261c9f1b0","Type":"ContainerStarted","Data":"33233d57f31afa3bb9c74b6c62e06f38aaa9fd57e0f3ace08b5c6ad4cce43a2e"} Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.584294 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-r59k5"] Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.610092 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-n8mdf"] Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.611439 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.632960 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-n8mdf"] Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.714279 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-n8mdf\" (UID: \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\") " pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.714351 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-config\") pod \"dnsmasq-dns-7c6d9948dc-n8mdf\" (UID: \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\") " pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.714430 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7xht\" (UniqueName: \"kubernetes.io/projected/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-kube-api-access-w7xht\") pod \"dnsmasq-dns-7c6d9948dc-n8mdf\" (UID: \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\") " pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.815669 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-n8mdf\" (UID: \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\") " pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.815732 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-config\") pod \"dnsmasq-dns-7c6d9948dc-n8mdf\" (UID: \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\") " pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.815808 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7xht\" (UniqueName: \"kubernetes.io/projected/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-kube-api-access-w7xht\") pod \"dnsmasq-dns-7c6d9948dc-n8mdf\" (UID: \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\") " pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.819933 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-n8mdf\" (UID: \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\") " pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.821485 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-config\") pod \"dnsmasq-dns-7c6d9948dc-n8mdf\" (UID: \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\") " pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.880322 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7xht\" (UniqueName: \"kubernetes.io/projected/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-kube-api-access-w7xht\") pod \"dnsmasq-dns-7c6d9948dc-n8mdf\" (UID: \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\") " pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:36:25 crc kubenswrapper[4800]: I1125 15:36:25.936456 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.045942 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-86dkp"] Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.053142 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-q77d4"] Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.055443 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.121477 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-q77d4"] Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.124385 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-config\") pod \"dnsmasq-dns-6486446b9f-q77d4\" (UID: \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\") " pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.124436 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-dns-svc\") pod \"dnsmasq-dns-6486446b9f-q77d4\" (UID: \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\") " pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.124478 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l26kx\" (UniqueName: \"kubernetes.io/projected/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-kube-api-access-l26kx\") pod \"dnsmasq-dns-6486446b9f-q77d4\" (UID: \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\") " pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.225796 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-config\") pod \"dnsmasq-dns-6486446b9f-q77d4\" (UID: \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\") " pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.226302 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-dns-svc\") pod \"dnsmasq-dns-6486446b9f-q77d4\" (UID: \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\") " pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.226347 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l26kx\" (UniqueName: \"kubernetes.io/projected/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-kube-api-access-l26kx\") pod \"dnsmasq-dns-6486446b9f-q77d4\" (UID: \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\") " pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.228503 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-dns-svc\") pod \"dnsmasq-dns-6486446b9f-q77d4\" (UID: \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\") " pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.228500 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-config\") pod \"dnsmasq-dns-6486446b9f-q77d4\" (UID: \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\") " pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.287052 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l26kx\" (UniqueName: \"kubernetes.io/projected/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-kube-api-access-l26kx\") pod \"dnsmasq-dns-6486446b9f-q77d4\" (UID: \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\") " pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.370315 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-n8mdf"] Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.428901 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.779193 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.780776 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.783356 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-9xqzd" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.783532 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.783613 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.783877 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.783967 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.784149 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.784567 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.799453 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.923002 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-q77d4"] Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.948555 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-config-data\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.948618 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.948651 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.948683 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2jtc\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-kube-api-access-l2jtc\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.948750 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.948794 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.948834 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/72be41d8-6678-467c-a4d5-c4340e488c1b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.948888 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.948946 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.948962 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/72be41d8-6678-467c-a4d5-c4340e488c1b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:26 crc kubenswrapper[4800]: I1125 15:36:26.949010 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.053012 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.053079 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-config-data\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.053111 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.053138 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.053163 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2jtc\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-kube-api-access-l2jtc\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.053191 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.053234 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/72be41d8-6678-467c-a4d5-c4340e488c1b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.053251 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.053277 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.053301 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.053317 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/72be41d8-6678-467c-a4d5-c4340e488c1b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.054220 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.054829 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-config-data\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.055771 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.056194 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.062775 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.067957 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.083570 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.083607 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/72be41d8-6678-467c-a4d5-c4340e488c1b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.084452 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.087856 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/72be41d8-6678-467c-a4d5-c4340e488c1b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.089922 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2jtc\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-kube-api-access-l2jtc\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.125035 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.179239 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.181200 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.188201 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.188635 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.188859 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.188980 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.189110 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.194309 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-bprtr" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.194606 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.203367 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.323344 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-q77d4" event={"ID":"cf387b8c-24e2-43e5-a1c7-65b876b98b8d","Type":"ContainerStarted","Data":"2fedeeabf5c32cc27179885e25e6a82a871748660b946d765ec79fb5e4b63d6d"} Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.327479 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" event={"ID":"d7d75985-cb77-46fc-ab4c-59a81c36cd4c","Type":"ContainerStarted","Data":"02ff8db2c1c500c1b999fd015a6ada3fd2c0bac029a0696ed7b9ba8f793312e9"} Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.358518 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/46b2c800-efef-4668-9a57-c66ff504e0db-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.358585 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.358619 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.358648 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.358679 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.358704 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.359003 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.359125 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.359273 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zk8kh\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-kube-api-access-zk8kh\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.359365 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.359600 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/46b2c800-efef-4668-9a57-c66ff504e0db-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.415569 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.461716 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zk8kh\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-kube-api-access-zk8kh\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.461784 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.461821 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/46b2c800-efef-4668-9a57-c66ff504e0db-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.461873 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/46b2c800-efef-4668-9a57-c66ff504e0db-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.461903 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.461930 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.461959 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.461986 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.462012 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.462034 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.462052 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.462925 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.463569 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.464664 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.465258 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.466466 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.467161 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.467326 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.471347 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/46b2c800-efef-4668-9a57-c66ff504e0db-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.475326 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/46b2c800-efef-4668-9a57-c66ff504e0db-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.483291 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.495385 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zk8kh\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-kube-api-access-zk8kh\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.518855 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.536489 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:36:27 crc kubenswrapper[4800]: I1125 15:36:27.982572 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 15:36:27 crc kubenswrapper[4800]: W1125 15:36:27.996422 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72be41d8_6678_467c_a4d5_c4340e488c1b.slice/crio-97b208d93931b80cc98ffaeece031a65c5ba3fa51eb0e8cce26411f4931bc9b8 WatchSource:0}: Error finding container 97b208d93931b80cc98ffaeece031a65c5ba3fa51eb0e8cce26411f4931bc9b8: Status 404 returned error can't find the container with id 97b208d93931b80cc98ffaeece031a65c5ba3fa51eb0e8cce26411f4931bc9b8 Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.104155 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.352732 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"72be41d8-6678-467c-a4d5-c4340e488c1b","Type":"ContainerStarted","Data":"97b208d93931b80cc98ffaeece031a65c5ba3fa51eb0e8cce26411f4931bc9b8"} Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.360267 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"46b2c800-efef-4668-9a57-c66ff504e0db","Type":"ContainerStarted","Data":"04857e909d03657d3e3ea0d5c80afd1577dea50e82af70029ce71e48112c853a"} Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.400538 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.402825 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.406519 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.407275 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.409934 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.410467 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-45tsd" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.424211 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.440838 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.482858 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.490342 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-operator-scripts\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.490485 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-config-data-generated\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.490592 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.490669 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.490778 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-config-data-default\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.490876 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbf6q\" (UniqueName: \"kubernetes.io/projected/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-kube-api-access-sbf6q\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.491450 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-kolla-config\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.593071 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.593135 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-operator-scripts\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.593162 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-config-data-generated\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.593197 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.593224 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.593271 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-config-data-default\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.593288 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbf6q\" (UniqueName: \"kubernetes.io/projected/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-kube-api-access-sbf6q\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.593309 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-kolla-config\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.594209 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-kolla-config\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.595631 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-config-data-default\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.595752 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.596013 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-config-data-generated\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.596745 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-operator-scripts\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.607966 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.626542 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbf6q\" (UniqueName: \"kubernetes.io/projected/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-kube-api-access-sbf6q\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.628458 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b4060af-fd4c-49d5-980e-a496a2fcfbd5-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:28 crc kubenswrapper[4800]: I1125 15:36:28.818009 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"6b4060af-fd4c-49d5-980e-a496a2fcfbd5\") " pod="openstack/openstack-galera-0" Nov 25 15:36:29 crc kubenswrapper[4800]: I1125 15:36:29.068392 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 15:36:29 crc kubenswrapper[4800]: I1125 15:36:29.691862 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 15:36:29 crc kubenswrapper[4800]: W1125 15:36:29.707973 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6b4060af_fd4c_49d5_980e_a496a2fcfbd5.slice/crio-fef20f17b84a3193694633cbcd8813cc078f2575beee8b32fe546896ca054d7d WatchSource:0}: Error finding container fef20f17b84a3193694633cbcd8813cc078f2575beee8b32fe546896ca054d7d: Status 404 returned error can't find the container with id fef20f17b84a3193694633cbcd8813cc078f2575beee8b32fe546896ca054d7d Nov 25 15:36:29 crc kubenswrapper[4800]: I1125 15:36:29.830621 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 15:36:29 crc kubenswrapper[4800]: I1125 15:36:29.835313 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:29 crc kubenswrapper[4800]: I1125 15:36:29.838122 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 15:36:29 crc kubenswrapper[4800]: I1125 15:36:29.838441 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 15:36:29 crc kubenswrapper[4800]: I1125 15:36:29.839100 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-kr9bq" Nov 25 15:36:29 crc kubenswrapper[4800]: I1125 15:36:29.841292 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 15:36:29 crc kubenswrapper[4800]: I1125 15:36:29.852125 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.017366 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.018710 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.026346 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.026446 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.026504 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.026764 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-4s7ff" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050214 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6fe0af4-a236-4346-a806-8601ecaa33b6-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050322 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f6fe0af4-a236-4346-a806-8601ecaa33b6-config-data\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050361 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0028cc49-034e-4ff3-99c1-7c13bb298646-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050396 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0028cc49-034e-4ff3-99c1-7c13bb298646-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050475 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0028cc49-034e-4ff3-99c1-7c13bb298646-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050517 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6fe0af4-a236-4346-a806-8601ecaa33b6-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050536 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29fj9\" (UniqueName: \"kubernetes.io/projected/0028cc49-034e-4ff3-99c1-7c13bb298646-kube-api-access-29fj9\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050571 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0028cc49-034e-4ff3-99c1-7c13bb298646-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050595 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb4dr\" (UniqueName: \"kubernetes.io/projected/f6fe0af4-a236-4346-a806-8601ecaa33b6-kube-api-access-pb4dr\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050646 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050677 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0028cc49-034e-4ff3-99c1-7c13bb298646-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050701 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0028cc49-034e-4ff3-99c1-7c13bb298646-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.050724 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f6fe0af4-a236-4346-a806-8601ecaa33b6-kolla-config\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.152528 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0028cc49-034e-4ff3-99c1-7c13bb298646-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.152601 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0028cc49-034e-4ff3-99c1-7c13bb298646-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.152625 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f6fe0af4-a236-4346-a806-8601ecaa33b6-kolla-config\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.152669 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6fe0af4-a236-4346-a806-8601ecaa33b6-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.152711 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f6fe0af4-a236-4346-a806-8601ecaa33b6-config-data\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.152745 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0028cc49-034e-4ff3-99c1-7c13bb298646-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.152777 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0028cc49-034e-4ff3-99c1-7c13bb298646-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.152804 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0028cc49-034e-4ff3-99c1-7c13bb298646-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.152828 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29fj9\" (UniqueName: \"kubernetes.io/projected/0028cc49-034e-4ff3-99c1-7c13bb298646-kube-api-access-29fj9\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.152961 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6fe0af4-a236-4346-a806-8601ecaa33b6-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.152994 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0028cc49-034e-4ff3-99c1-7c13bb298646-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.153025 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb4dr\" (UniqueName: \"kubernetes.io/projected/f6fe0af4-a236-4346-a806-8601ecaa33b6-kube-api-access-pb4dr\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.153075 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.153638 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.153689 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0028cc49-034e-4ff3-99c1-7c13bb298646-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.155056 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0028cc49-034e-4ff3-99c1-7c13bb298646-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.156615 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f6fe0af4-a236-4346-a806-8601ecaa33b6-config-data\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.156641 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0028cc49-034e-4ff3-99c1-7c13bb298646-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.158611 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0028cc49-034e-4ff3-99c1-7c13bb298646-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.160767 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f6fe0af4-a236-4346-a806-8601ecaa33b6-kolla-config\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.180757 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0028cc49-034e-4ff3-99c1-7c13bb298646-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.182487 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6fe0af4-a236-4346-a806-8601ecaa33b6-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.183944 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0028cc49-034e-4ff3-99c1-7c13bb298646-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.188772 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6fe0af4-a236-4346-a806-8601ecaa33b6-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.195672 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb4dr\" (UniqueName: \"kubernetes.io/projected/f6fe0af4-a236-4346-a806-8601ecaa33b6-kube-api-access-pb4dr\") pod \"memcached-0\" (UID: \"f6fe0af4-a236-4346-a806-8601ecaa33b6\") " pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.198810 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29fj9\" (UniqueName: \"kubernetes.io/projected/0028cc49-034e-4ff3-99c1-7c13bb298646-kube-api-access-29fj9\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.241319 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0028cc49-034e-4ff3-99c1-7c13bb298646\") " pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.341071 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.424482 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6b4060af-fd4c-49d5-980e-a496a2fcfbd5","Type":"ContainerStarted","Data":"fef20f17b84a3193694633cbcd8813cc078f2575beee8b32fe546896ca054d7d"} Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.507619 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.869562 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 15:36:30 crc kubenswrapper[4800]: I1125 15:36:30.991794 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 15:36:31 crc kubenswrapper[4800]: I1125 15:36:31.912512 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 15:36:31 crc kubenswrapper[4800]: I1125 15:36:31.915540 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 15:36:31 crc kubenswrapper[4800]: I1125 15:36:31.918791 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-5q2px" Nov 25 15:36:31 crc kubenswrapper[4800]: I1125 15:36:31.936375 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 15:36:31 crc kubenswrapper[4800]: I1125 15:36:31.998730 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br7bm\" (UniqueName: \"kubernetes.io/projected/65654af1-0a54-4d42-b45b-bae47243b055-kube-api-access-br7bm\") pod \"kube-state-metrics-0\" (UID: \"65654af1-0a54-4d42-b45b-bae47243b055\") " pod="openstack/kube-state-metrics-0" Nov 25 15:36:32 crc kubenswrapper[4800]: I1125 15:36:32.101915 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br7bm\" (UniqueName: \"kubernetes.io/projected/65654af1-0a54-4d42-b45b-bae47243b055-kube-api-access-br7bm\") pod \"kube-state-metrics-0\" (UID: \"65654af1-0a54-4d42-b45b-bae47243b055\") " pod="openstack/kube-state-metrics-0" Nov 25 15:36:32 crc kubenswrapper[4800]: I1125 15:36:32.139325 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br7bm\" (UniqueName: \"kubernetes.io/projected/65654af1-0a54-4d42-b45b-bae47243b055-kube-api-access-br7bm\") pod \"kube-state-metrics-0\" (UID: \"65654af1-0a54-4d42-b45b-bae47243b055\") " pod="openstack/kube-state-metrics-0" Nov 25 15:36:32 crc kubenswrapper[4800]: I1125 15:36:32.256617 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.262476 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-jmbtv"] Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.264604 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.269405 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.269748 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.272569 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-f6czt" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.293531 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-tklmv"] Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.296613 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.315058 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jmbtv"] Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.324384 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-tklmv"] Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.371948 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1ead9ef3-a389-45a2-a1be-0b1d07116fde-var-run\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.371997 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1ead9ef3-a389-45a2-a1be-0b1d07116fde-var-lib\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.372017 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-combined-ca-bundle\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.372037 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-var-log-ovn\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.372066 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-ovn-controller-tls-certs\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.372093 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-scripts\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.372155 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-var-run-ovn\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.372319 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1ead9ef3-a389-45a2-a1be-0b1d07116fde-var-log\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.372356 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1ead9ef3-a389-45a2-a1be-0b1d07116fde-scripts\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.372429 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1ead9ef3-a389-45a2-a1be-0b1d07116fde-etc-ovs\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.372473 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-var-run\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.372527 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zd2s\" (UniqueName: \"kubernetes.io/projected/1ead9ef3-a389-45a2-a1be-0b1d07116fde-kube-api-access-6zd2s\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.372553 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-849x6\" (UniqueName: \"kubernetes.io/projected/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-kube-api-access-849x6\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.473986 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1ead9ef3-a389-45a2-a1be-0b1d07116fde-etc-ovs\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474073 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-var-run\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474126 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zd2s\" (UniqueName: \"kubernetes.io/projected/1ead9ef3-a389-45a2-a1be-0b1d07116fde-kube-api-access-6zd2s\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474156 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-849x6\" (UniqueName: \"kubernetes.io/projected/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-kube-api-access-849x6\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474200 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1ead9ef3-a389-45a2-a1be-0b1d07116fde-var-run\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474221 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1ead9ef3-a389-45a2-a1be-0b1d07116fde-var-lib\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474241 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-combined-ca-bundle\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474259 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-var-log-ovn\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474283 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-ovn-controller-tls-certs\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474305 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-var-run-ovn\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474324 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-scripts\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474361 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1ead9ef3-a389-45a2-a1be-0b1d07116fde-var-log\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474382 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1ead9ef3-a389-45a2-a1be-0b1d07116fde-scripts\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474548 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1ead9ef3-a389-45a2-a1be-0b1d07116fde-etc-ovs\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474699 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-var-run\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474753 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1ead9ef3-a389-45a2-a1be-0b1d07116fde-var-run\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474772 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-var-log-ovn\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474797 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-var-run-ovn\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.474930 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1ead9ef3-a389-45a2-a1be-0b1d07116fde-var-log\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.475138 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1ead9ef3-a389-45a2-a1be-0b1d07116fde-var-lib\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.477065 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-scripts\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.478408 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1ead9ef3-a389-45a2-a1be-0b1d07116fde-scripts\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.488653 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-ovn-controller-tls-certs\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.488893 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-combined-ca-bundle\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.492348 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zd2s\" (UniqueName: \"kubernetes.io/projected/1ead9ef3-a389-45a2-a1be-0b1d07116fde-kube-api-access-6zd2s\") pod \"ovn-controller-ovs-tklmv\" (UID: \"1ead9ef3-a389-45a2-a1be-0b1d07116fde\") " pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.492566 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-849x6\" (UniqueName: \"kubernetes.io/projected/f0140b9d-bed7-44ae-a1d5-8e0acdb70742-kube-api-access-849x6\") pod \"ovn-controller-jmbtv\" (UID: \"f0140b9d-bed7-44ae-a1d5-8e0acdb70742\") " pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.588039 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jmbtv" Nov 25 15:36:35 crc kubenswrapper[4800]: I1125 15:36:35.615608 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.169439 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.171252 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.174834 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-fll2j" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.174967 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.176523 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.176572 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.176600 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.182559 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.288581 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/73dd3869-2591-41f2-8164-004d29e14e44-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.288803 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73dd3869-2591-41f2-8164-004d29e14e44-config\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.288867 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/73dd3869-2591-41f2-8164-004d29e14e44-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.288952 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/73dd3869-2591-41f2-8164-004d29e14e44-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.289032 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73dd3869-2591-41f2-8164-004d29e14e44-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.289066 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.289179 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/73dd3869-2591-41f2-8164-004d29e14e44-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.289303 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rv9xs\" (UniqueName: \"kubernetes.io/projected/73dd3869-2591-41f2-8164-004d29e14e44-kube-api-access-rv9xs\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.390800 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.390887 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/73dd3869-2591-41f2-8164-004d29e14e44-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.390940 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rv9xs\" (UniqueName: \"kubernetes.io/projected/73dd3869-2591-41f2-8164-004d29e14e44-kube-api-access-rv9xs\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.391001 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/73dd3869-2591-41f2-8164-004d29e14e44-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.391042 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73dd3869-2591-41f2-8164-004d29e14e44-config\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.391059 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/73dd3869-2591-41f2-8164-004d29e14e44-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.391084 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/73dd3869-2591-41f2-8164-004d29e14e44-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.391109 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73dd3869-2591-41f2-8164-004d29e14e44-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.391322 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.391957 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/73dd3869-2591-41f2-8164-004d29e14e44-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.392210 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73dd3869-2591-41f2-8164-004d29e14e44-config\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.392684 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/73dd3869-2591-41f2-8164-004d29e14e44-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.395759 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/73dd3869-2591-41f2-8164-004d29e14e44-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.397197 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/73dd3869-2591-41f2-8164-004d29e14e44-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.398254 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73dd3869-2591-41f2-8164-004d29e14e44-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.412746 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.417109 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rv9xs\" (UniqueName: \"kubernetes.io/projected/73dd3869-2591-41f2-8164-004d29e14e44-kube-api-access-rv9xs\") pod \"ovsdbserver-nb-0\" (UID: \"73dd3869-2591-41f2-8164-004d29e14e44\") " pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:36 crc kubenswrapper[4800]: I1125 15:36:36.494835 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 15:36:37 crc kubenswrapper[4800]: W1125 15:36:37.493033 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6fe0af4_a236_4346_a806_8601ecaa33b6.slice/crio-c0c60b84560c9a890fc9109747825650e35d6fa8368ed6d4eff156371d1691cb WatchSource:0}: Error finding container c0c60b84560c9a890fc9109747825650e35d6fa8368ed6d4eff156371d1691cb: Status 404 returned error can't find the container with id c0c60b84560c9a890fc9109747825650e35d6fa8368ed6d4eff156371d1691cb Nov 25 15:36:37 crc kubenswrapper[4800]: I1125 15:36:37.523455 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0028cc49-034e-4ff3-99c1-7c13bb298646","Type":"ContainerStarted","Data":"c6fc2596a137416d8f9a58aab759283faa7327d38a1cd6d58f8e67e460258f3d"} Nov 25 15:36:37 crc kubenswrapper[4800]: I1125 15:36:37.525405 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f6fe0af4-a236-4346-a806-8601ecaa33b6","Type":"ContainerStarted","Data":"c0c60b84560c9a890fc9109747825650e35d6fa8368ed6d4eff156371d1691cb"} Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.344225 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.346204 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.350340 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.350619 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.351157 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.352087 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-r8d7h" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.358451 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.447817 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.447915 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh85v\" (UniqueName: \"kubernetes.io/projected/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-kube-api-access-bh85v\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.447956 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.447993 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.448024 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.448048 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.448080 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-config\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.448149 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.549985 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.550075 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.550115 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.550137 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.550160 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-config\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.550214 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.550305 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.550327 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh85v\" (UniqueName: \"kubernetes.io/projected/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-kube-api-access-bh85v\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.551019 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.551403 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.551683 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-config\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.552580 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.562738 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.565239 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.566377 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.568775 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh85v\" (UniqueName: \"kubernetes.io/projected/9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c-kube-api-access-bh85v\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.576357 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c\") " pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:39 crc kubenswrapper[4800]: I1125 15:36:39.709147 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 15:36:47 crc kubenswrapper[4800]: E1125 15:36:47.902161 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b" Nov 25 15:36:47 crc kubenswrapper[4800]: E1125 15:36:47.903270 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l2jtc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(72be41d8-6678-467c-a4d5-c4340e488c1b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:36:47 crc kubenswrapper[4800]: E1125 15:36:47.905089 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="72be41d8-6678-467c-a4d5-c4340e488c1b" Nov 25 15:36:47 crc kubenswrapper[4800]: E1125 15:36:47.962224 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b" Nov 25 15:36:47 crc kubenswrapper[4800]: E1125 15:36:47.962486 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zk8kh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(46b2c800-efef-4668-9a57-c66ff504e0db): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:36:47 crc kubenswrapper[4800]: E1125 15:36:47.963726 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="46b2c800-efef-4668-9a57-c66ff504e0db" Nov 25 15:36:48 crc kubenswrapper[4800]: E1125 15:36:48.630695 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b\\\"\"" pod="openstack/rabbitmq-server-0" podUID="72be41d8-6678-467c-a4d5-c4340e488c1b" Nov 25 15:36:48 crc kubenswrapper[4800]: E1125 15:36:48.631341 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="46b2c800-efef-4668-9a57-c66ff504e0db" Nov 25 15:36:53 crc kubenswrapper[4800]: E1125 15:36:53.600874 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce" Nov 25 15:36:53 crc kubenswrapper[4800]: E1125 15:36:53.601834 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sbf6q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(6b4060af-fd4c-49d5-980e-a496a2fcfbd5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:36:53 crc kubenswrapper[4800]: E1125 15:36:53.603109 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="6b4060af-fd4c-49d5-980e-a496a2fcfbd5" Nov 25 15:36:53 crc kubenswrapper[4800]: E1125 15:36:53.671928 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce\\\"\"" pod="openstack/openstack-galera-0" podUID="6b4060af-fd4c-49d5-980e-a496a2fcfbd5" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.499808 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.500661 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5xw5x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-6584b49599-r59k5_openstack(1ba88f3b-e0dc-431a-adef-06d261c9f1b0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.501925 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-6584b49599-r59k5" podUID="1ba88f3b-e0dc-431a-adef-06d261c9f1b0" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.510956 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.511203 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w7xht,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7c6d9948dc-n8mdf_openstack(d7d75985-cb77-46fc-ab4c-59a81c36cd4c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.512483 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" podUID="d7d75985-cb77-46fc-ab4c-59a81c36cd4c" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.531198 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.531479 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l26kx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-6486446b9f-q77d4_openstack(cf387b8c-24e2-43e5-a1c7-65b876b98b8d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.532875 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-6486446b9f-q77d4" podUID="cf387b8c-24e2-43e5-a1c7-65b876b98b8d" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.549730 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.550048 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-psxcq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7bdd77c89-86dkp_openstack(2b052479-1e8c-4559-93a8-8b542485b092): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.551313 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" podUID="2b052479-1e8c-4559-93a8-8b542485b092" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.677445 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba\\\"\"" pod="openstack/dnsmasq-dns-6486446b9f-q77d4" podUID="cf387b8c-24e2-43e5-a1c7-65b876b98b8d" Nov 25 15:36:54 crc kubenswrapper[4800]: E1125 15:36:54.678485 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba\\\"\"" pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" podUID="d7d75985-cb77-46fc-ab4c-59a81c36cd4c" Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.691057 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-r59k5" event={"ID":"1ba88f3b-e0dc-431a-adef-06d261c9f1b0","Type":"ContainerDied","Data":"33233d57f31afa3bb9c74b6c62e06f38aaa9fd57e0f3ace08b5c6ad4cce43a2e"} Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.691876 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="33233d57f31afa3bb9c74b6c62e06f38aaa9fd57e0f3ace08b5c6ad4cce43a2e" Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.691901 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" event={"ID":"2b052479-1e8c-4559-93a8-8b542485b092","Type":"ContainerDied","Data":"42a0b5f6132580f06f46cd50e78a52d27b61a211c016c2796361d755f8412a86"} Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.691918 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42a0b5f6132580f06f46cd50e78a52d27b61a211c016c2796361d755f8412a86" Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.782720 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.885009 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.949577 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xw5x\" (UniqueName: \"kubernetes.io/projected/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-kube-api-access-5xw5x\") pod \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\" (UID: \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\") " Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.949819 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-dns-svc\") pod \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\" (UID: \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\") " Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.949928 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-config\") pod \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\" (UID: \"1ba88f3b-e0dc-431a-adef-06d261c9f1b0\") " Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.950714 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-config" (OuterVolumeSpecName: "config") pod "1ba88f3b-e0dc-431a-adef-06d261c9f1b0" (UID: "1ba88f3b-e0dc-431a-adef-06d261c9f1b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.953767 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1ba88f3b-e0dc-431a-adef-06d261c9f1b0" (UID: "1ba88f3b-e0dc-431a-adef-06d261c9f1b0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.958879 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-kube-api-access-5xw5x" (OuterVolumeSpecName: "kube-api-access-5xw5x") pod "1ba88f3b-e0dc-431a-adef-06d261c9f1b0" (UID: "1ba88f3b-e0dc-431a-adef-06d261c9f1b0"). InnerVolumeSpecName "kube-api-access-5xw5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:36:55 crc kubenswrapper[4800]: I1125 15:36:55.984661 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 15:36:55 crc kubenswrapper[4800]: W1125 15:36:55.989292 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65654af1_0a54_4d42_b45b_bae47243b055.slice/crio-b027011e502f7e82f252aceffb505d6b560cb690e6618bfc10142ad74dd1c911 WatchSource:0}: Error finding container b027011e502f7e82f252aceffb505d6b560cb690e6618bfc10142ad74dd1c911: Status 404 returned error can't find the container with id b027011e502f7e82f252aceffb505d6b560cb690e6618bfc10142ad74dd1c911 Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.051140 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psxcq\" (UniqueName: \"kubernetes.io/projected/2b052479-1e8c-4559-93a8-8b542485b092-kube-api-access-psxcq\") pod \"2b052479-1e8c-4559-93a8-8b542485b092\" (UID: \"2b052479-1e8c-4559-93a8-8b542485b092\") " Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.051684 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b052479-1e8c-4559-93a8-8b542485b092-config\") pod \"2b052479-1e8c-4559-93a8-8b542485b092\" (UID: \"2b052479-1e8c-4559-93a8-8b542485b092\") " Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.052217 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b052479-1e8c-4559-93a8-8b542485b092-config" (OuterVolumeSpecName: "config") pod "2b052479-1e8c-4559-93a8-8b542485b092" (UID: "2b052479-1e8c-4559-93a8-8b542485b092"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.052808 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.052866 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xw5x\" (UniqueName: \"kubernetes.io/projected/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-kube-api-access-5xw5x\") on node \"crc\" DevicePath \"\"" Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.052880 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ba88f3b-e0dc-431a-adef-06d261c9f1b0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.052891 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b052479-1e8c-4559-93a8-8b542485b092-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.058029 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b052479-1e8c-4559-93a8-8b542485b092-kube-api-access-psxcq" (OuterVolumeSpecName: "kube-api-access-psxcq") pod "2b052479-1e8c-4559-93a8-8b542485b092" (UID: "2b052479-1e8c-4559-93a8-8b542485b092"). InnerVolumeSpecName "kube-api-access-psxcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.103305 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jmbtv"] Nov 25 15:36:56 crc kubenswrapper[4800]: W1125 15:36:56.134318 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ead9ef3_a389_45a2_a1be_0b1d07116fde.slice/crio-896143983fffcc714a19b81d7c080f1be02e8fde28db509d04a06094dba36a45 WatchSource:0}: Error finding container 896143983fffcc714a19b81d7c080f1be02e8fde28db509d04a06094dba36a45: Status 404 returned error can't find the container with id 896143983fffcc714a19b81d7c080f1be02e8fde28db509d04a06094dba36a45 Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.140962 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-tklmv"] Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.154273 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psxcq\" (UniqueName: \"kubernetes.io/projected/2b052479-1e8c-4559-93a8-8b542485b092-kube-api-access-psxcq\") on node \"crc\" DevicePath \"\"" Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.216611 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 15:36:56 crc kubenswrapper[4800]: W1125 15:36:56.217657 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d7ac5c2_e9e5_4f6f_b992_0e752f34a33c.slice/crio-3629c1c42c29cab51aa9ac266f3d2d1665208210152d0c05ecb4d476540b24fe WatchSource:0}: Error finding container 3629c1c42c29cab51aa9ac266f3d2d1665208210152d0c05ecb4d476540b24fe: Status 404 returned error can't find the container with id 3629c1c42c29cab51aa9ac266f3d2d1665208210152d0c05ecb4d476540b24fe Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.701590 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jmbtv" event={"ID":"f0140b9d-bed7-44ae-a1d5-8e0acdb70742","Type":"ContainerStarted","Data":"f480e379ddb3c9dfa1d52a3424fbdcebc30dd8dc5ea865896ab6e842bea49ed7"} Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.703368 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tklmv" event={"ID":"1ead9ef3-a389-45a2-a1be-0b1d07116fde","Type":"ContainerStarted","Data":"896143983fffcc714a19b81d7c080f1be02e8fde28db509d04a06094dba36a45"} Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.705462 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0028cc49-034e-4ff3-99c1-7c13bb298646","Type":"ContainerStarted","Data":"8fd1acf704d90cc7969d107b5f34b6bf852d6a93252f2f5c3c6ff9a12e9b4f53"} Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.708050 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f6fe0af4-a236-4346-a806-8601ecaa33b6","Type":"ContainerStarted","Data":"f96ce19bdc347103bbff3373a8a996a3bf7a61bad58d988c782b4a83b17825e7"} Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.708238 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.709995 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c","Type":"ContainerStarted","Data":"3629c1c42c29cab51aa9ac266f3d2d1665208210152d0c05ecb4d476540b24fe"} Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.712174 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-r59k5" Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.712510 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"65654af1-0a54-4d42-b45b-bae47243b055","Type":"ContainerStarted","Data":"b027011e502f7e82f252aceffb505d6b560cb690e6618bfc10142ad74dd1c911"} Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.712574 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-86dkp" Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.752287 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=9.702028766 podStartE2EDuration="27.752260164s" podCreationTimestamp="2025-11-25 15:36:29 +0000 UTC" firstStartedPulling="2025-11-25 15:36:37.497309982 +0000 UTC m=+1158.551718464" lastFinishedPulling="2025-11-25 15:36:55.54754138 +0000 UTC m=+1176.601949862" observedRunningTime="2025-11-25 15:36:56.748386557 +0000 UTC m=+1177.802795059" watchObservedRunningTime="2025-11-25 15:36:56.752260164 +0000 UTC m=+1177.806668646" Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.852573 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-86dkp"] Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.874653 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-86dkp"] Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.909346 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-r59k5"] Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.922782 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-r59k5"] Nov 25 15:36:56 crc kubenswrapper[4800]: I1125 15:36:56.932575 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 15:36:57 crc kubenswrapper[4800]: W1125 15:36:57.099161 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod73dd3869_2591_41f2_8164_004d29e14e44.slice/crio-188ef3a071611071c6f7f18b11d41aafff5b5ccd6412b01b891217b474d81ce0 WatchSource:0}: Error finding container 188ef3a071611071c6f7f18b11d41aafff5b5ccd6412b01b891217b474d81ce0: Status 404 returned error can't find the container with id 188ef3a071611071c6f7f18b11d41aafff5b5ccd6412b01b891217b474d81ce0 Nov 25 15:36:57 crc kubenswrapper[4800]: I1125 15:36:57.722364 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"73dd3869-2591-41f2-8164-004d29e14e44","Type":"ContainerStarted","Data":"188ef3a071611071c6f7f18b11d41aafff5b5ccd6412b01b891217b474d81ce0"} Nov 25 15:36:57 crc kubenswrapper[4800]: I1125 15:36:57.796374 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ba88f3b-e0dc-431a-adef-06d261c9f1b0" path="/var/lib/kubelet/pods/1ba88f3b-e0dc-431a-adef-06d261c9f1b0/volumes" Nov 25 15:36:57 crc kubenswrapper[4800]: I1125 15:36:57.796812 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b052479-1e8c-4559-93a8-8b542485b092" path="/var/lib/kubelet/pods/2b052479-1e8c-4559-93a8-8b542485b092/volumes" Nov 25 15:36:59 crc kubenswrapper[4800]: I1125 15:36:59.743386 4800 generic.go:334] "Generic (PLEG): container finished" podID="0028cc49-034e-4ff3-99c1-7c13bb298646" containerID="8fd1acf704d90cc7969d107b5f34b6bf852d6a93252f2f5c3c6ff9a12e9b4f53" exitCode=0 Nov 25 15:36:59 crc kubenswrapper[4800]: I1125 15:36:59.743449 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0028cc49-034e-4ff3-99c1-7c13bb298646","Type":"ContainerDied","Data":"8fd1acf704d90cc7969d107b5f34b6bf852d6a93252f2f5c3c6ff9a12e9b4f53"} Nov 25 15:37:00 crc kubenswrapper[4800]: I1125 15:37:00.347494 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 25 15:37:01 crc kubenswrapper[4800]: I1125 15:37:01.806022 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c","Type":"ContainerStarted","Data":"337d50d0bb47cf39af009ec23d0ef36cb8dc7584e6a7ff32647a6a759f59be86"} Nov 25 15:37:01 crc kubenswrapper[4800]: I1125 15:37:01.806951 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 15:37:01 crc kubenswrapper[4800]: I1125 15:37:01.806972 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"65654af1-0a54-4d42-b45b-bae47243b055","Type":"ContainerStarted","Data":"d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680"} Nov 25 15:37:01 crc kubenswrapper[4800]: I1125 15:37:01.810116 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jmbtv" event={"ID":"f0140b9d-bed7-44ae-a1d5-8e0acdb70742","Type":"ContainerStarted","Data":"920f666c13f8c30bc91a973c4a0ecaa3611b60a607ce1093680d099cec999022"} Nov 25 15:37:01 crc kubenswrapper[4800]: I1125 15:37:01.811192 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-jmbtv" Nov 25 15:37:01 crc kubenswrapper[4800]: I1125 15:37:01.813937 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tklmv" event={"ID":"1ead9ef3-a389-45a2-a1be-0b1d07116fde","Type":"ContainerStarted","Data":"057d71e10e445c6076cdfac43b461933c3d06b24c8a61e4f5f634451b20306e8"} Nov 25 15:37:01 crc kubenswrapper[4800]: I1125 15:37:01.825520 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0028cc49-034e-4ff3-99c1-7c13bb298646","Type":"ContainerStarted","Data":"d75974fe90e685536112d1539d408a04221ed54beeedeb5948f95c1249a01292"} Nov 25 15:37:01 crc kubenswrapper[4800]: I1125 15:37:01.833430 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"73dd3869-2591-41f2-8164-004d29e14e44","Type":"ContainerStarted","Data":"db01f4c9e6fe04e25b28ff4d26c9f876960b5ce99c0dbfa83f52a61bc699e54f"} Nov 25 15:37:01 crc kubenswrapper[4800]: I1125 15:37:01.838937 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=26.339737375 podStartE2EDuration="30.8389066s" podCreationTimestamp="2025-11-25 15:36:31 +0000 UTC" firstStartedPulling="2025-11-25 15:36:55.993780504 +0000 UTC m=+1177.048188986" lastFinishedPulling="2025-11-25 15:37:00.492949739 +0000 UTC m=+1181.547358211" observedRunningTime="2025-11-25 15:37:01.824190115 +0000 UTC m=+1182.878598617" watchObservedRunningTime="2025-11-25 15:37:01.8389066 +0000 UTC m=+1182.893315092" Nov 25 15:37:01 crc kubenswrapper[4800]: I1125 15:37:01.851331 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-jmbtv" podStartSLOduration=22.480713928 podStartE2EDuration="26.851302772s" podCreationTimestamp="2025-11-25 15:36:35 +0000 UTC" firstStartedPulling="2025-11-25 15:36:56.112637596 +0000 UTC m=+1177.167046078" lastFinishedPulling="2025-11-25 15:37:00.48322644 +0000 UTC m=+1181.537634922" observedRunningTime="2025-11-25 15:37:01.847162787 +0000 UTC m=+1182.901571289" watchObservedRunningTime="2025-11-25 15:37:01.851302772 +0000 UTC m=+1182.905711244" Nov 25 15:37:01 crc kubenswrapper[4800]: I1125 15:37:01.903951 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=15.892444011 podStartE2EDuration="33.903926261s" podCreationTimestamp="2025-11-25 15:36:28 +0000 UTC" firstStartedPulling="2025-11-25 15:36:37.495850143 +0000 UTC m=+1158.550258625" lastFinishedPulling="2025-11-25 15:36:55.507332393 +0000 UTC m=+1176.561740875" observedRunningTime="2025-11-25 15:37:01.897240576 +0000 UTC m=+1182.951649068" watchObservedRunningTime="2025-11-25 15:37:01.903926261 +0000 UTC m=+1182.958334743" Nov 25 15:37:02 crc kubenswrapper[4800]: I1125 15:37:02.844101 4800 generic.go:334] "Generic (PLEG): container finished" podID="1ead9ef3-a389-45a2-a1be-0b1d07116fde" containerID="057d71e10e445c6076cdfac43b461933c3d06b24c8a61e4f5f634451b20306e8" exitCode=0 Nov 25 15:37:02 crc kubenswrapper[4800]: I1125 15:37:02.844211 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tklmv" event={"ID":"1ead9ef3-a389-45a2-a1be-0b1d07116fde","Type":"ContainerDied","Data":"057d71e10e445c6076cdfac43b461933c3d06b24c8a61e4f5f634451b20306e8"} Nov 25 15:37:02 crc kubenswrapper[4800]: I1125 15:37:02.849017 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"72be41d8-6678-467c-a4d5-c4340e488c1b","Type":"ContainerStarted","Data":"6a6b68af0650fee007aae70ba973e29d9cce12f19e8243d8b1dff41faa58f8ca"} Nov 25 15:37:02 crc kubenswrapper[4800]: I1125 15:37:02.851389 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"46b2c800-efef-4668-9a57-c66ff504e0db","Type":"ContainerStarted","Data":"71e2b7cea92e41a8123ca4d13df2f0b15c8131951b2ee7c6fe406d4b808e7552"} Nov 25 15:37:04 crc kubenswrapper[4800]: I1125 15:37:04.874077 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tklmv" event={"ID":"1ead9ef3-a389-45a2-a1be-0b1d07116fde","Type":"ContainerStarted","Data":"67b74b4ce0de6a2844a1bd621e9c7d7f42a73b84e0eec0ccda6c9bda4e42e510"} Nov 25 15:37:04 crc kubenswrapper[4800]: I1125 15:37:04.875396 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tklmv" event={"ID":"1ead9ef3-a389-45a2-a1be-0b1d07116fde","Type":"ContainerStarted","Data":"1d0638a3ab7ac89d3949ca0a99a0d1ec31993d5ec2d2a724736eb71900469864"} Nov 25 15:37:04 crc kubenswrapper[4800]: I1125 15:37:04.875433 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:37:04 crc kubenswrapper[4800]: I1125 15:37:04.875455 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:37:04 crc kubenswrapper[4800]: I1125 15:37:04.876706 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"73dd3869-2591-41f2-8164-004d29e14e44","Type":"ContainerStarted","Data":"62c87c61e2d59b4fc9c3c64372a180ac98dd8eaf7a64621a2c7c7052075dcefb"} Nov 25 15:37:04 crc kubenswrapper[4800]: I1125 15:37:04.880053 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c","Type":"ContainerStarted","Data":"dadccc49cd59c7466da228e2a450ba0b71a9dde919f3f9d1df6ba37069469741"} Nov 25 15:37:04 crc kubenswrapper[4800]: I1125 15:37:04.904730 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-tklmv" podStartSLOduration=25.664761767999998 podStartE2EDuration="29.904697166s" podCreationTimestamp="2025-11-25 15:36:35 +0000 UTC" firstStartedPulling="2025-11-25 15:36:56.137630554 +0000 UTC m=+1177.192039056" lastFinishedPulling="2025-11-25 15:37:00.377565982 +0000 UTC m=+1181.431974454" observedRunningTime="2025-11-25 15:37:04.902596658 +0000 UTC m=+1185.957005180" watchObservedRunningTime="2025-11-25 15:37:04.904697166 +0000 UTC m=+1185.959105668" Nov 25 15:37:04 crc kubenswrapper[4800]: I1125 15:37:04.937682 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=22.770536465 podStartE2EDuration="29.937663214s" podCreationTimestamp="2025-11-25 15:36:35 +0000 UTC" firstStartedPulling="2025-11-25 15:36:57.101636241 +0000 UTC m=+1178.156044723" lastFinishedPulling="2025-11-25 15:37:04.26876299 +0000 UTC m=+1185.323171472" observedRunningTime="2025-11-25 15:37:04.93136204 +0000 UTC m=+1185.985770522" watchObservedRunningTime="2025-11-25 15:37:04.937663214 +0000 UTC m=+1185.992071706" Nov 25 15:37:04 crc kubenswrapper[4800]: I1125 15:37:04.965330 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=18.897541862 podStartE2EDuration="26.965303364s" podCreationTimestamp="2025-11-25 15:36:38 +0000 UTC" firstStartedPulling="2025-11-25 15:36:56.219981691 +0000 UTC m=+1177.274390173" lastFinishedPulling="2025-11-25 15:37:04.287743193 +0000 UTC m=+1185.342151675" observedRunningTime="2025-11-25 15:37:04.957542091 +0000 UTC m=+1186.011950583" watchObservedRunningTime="2025-11-25 15:37:04.965303364 +0000 UTC m=+1186.019711866" Nov 25 15:37:06 crc kubenswrapper[4800]: I1125 15:37:06.495277 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 25 15:37:06 crc kubenswrapper[4800]: I1125 15:37:06.495709 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 25 15:37:06 crc kubenswrapper[4800]: I1125 15:37:06.551004 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 25 15:37:06 crc kubenswrapper[4800]: I1125 15:37:06.709828 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 25 15:37:06 crc kubenswrapper[4800]: I1125 15:37:06.758624 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 25 15:37:06 crc kubenswrapper[4800]: I1125 15:37:06.896614 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 25 15:37:06 crc kubenswrapper[4800]: I1125 15:37:06.953951 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 25 15:37:06 crc kubenswrapper[4800]: I1125 15:37:06.960632 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.167522 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-q77d4"] Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.231446 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5848494dd9-tx4tn"] Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.233129 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.237927 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-7jcg4"] Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.238486 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.239737 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.242865 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.266001 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5848494dd9-tx4tn"] Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.283925 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-7jcg4"] Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.329467 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-dns-svc\") pod \"dnsmasq-dns-5848494dd9-tx4tn\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.329533 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-ovsdbserver-sb\") pod \"dnsmasq-dns-5848494dd9-tx4tn\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.329564 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-config\") pod \"dnsmasq-dns-5848494dd9-tx4tn\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.329993 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsqnk\" (UniqueName: \"kubernetes.io/projected/a2a07cf0-e668-43c3-bc9a-8594243f1d02-kube-api-access-fsqnk\") pod \"dnsmasq-dns-5848494dd9-tx4tn\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.409709 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-n8mdf"] Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.444600 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-dns-svc\") pod \"dnsmasq-dns-5848494dd9-tx4tn\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.444689 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-ovsdbserver-sb\") pod \"dnsmasq-dns-5848494dd9-tx4tn\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.444754 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-ovs-rundir\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.444780 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-config\") pod \"dnsmasq-dns-5848494dd9-tx4tn\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.444964 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-combined-ca-bundle\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.445550 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-config\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.445927 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btcqb\" (UniqueName: \"kubernetes.io/projected/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-kube-api-access-btcqb\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.445956 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsqnk\" (UniqueName: \"kubernetes.io/projected/a2a07cf0-e668-43c3-bc9a-8594243f1d02-kube-api-access-fsqnk\") pod \"dnsmasq-dns-5848494dd9-tx4tn\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.447783 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-config\") pod \"dnsmasq-dns-5848494dd9-tx4tn\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.448390 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-dns-svc\") pod \"dnsmasq-dns-5848494dd9-tx4tn\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.448483 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.448537 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-ovn-rundir\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.458324 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-ovsdbserver-sb\") pod \"dnsmasq-dns-5848494dd9-tx4tn\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.476613 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-5rdjt"] Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.490812 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.510994 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.522136 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsqnk\" (UniqueName: \"kubernetes.io/projected/a2a07cf0-e668-43c3-bc9a-8594243f1d02-kube-api-access-fsqnk\") pod \"dnsmasq-dns-5848494dd9-tx4tn\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.546529 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-5rdjt"] Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.552875 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-ovs-rundir\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.552445 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-ovs-rundir\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.553010 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-combined-ca-bundle\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.553034 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-config\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.553077 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btcqb\" (UniqueName: \"kubernetes.io/projected/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-kube-api-access-btcqb\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.553106 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.553155 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-ovn-rundir\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.553258 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-ovn-rundir\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.554385 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-config\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.559799 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-combined-ca-bundle\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.560401 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.570804 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.573006 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.578688 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btcqb\" (UniqueName: \"kubernetes.io/projected/b7276fee-e7c9-4661-bc71-c1d2a4d4593e-kube-api-access-btcqb\") pod \"ovn-controller-metrics-7jcg4\" (UID: \"b7276fee-e7c9-4661-bc71-c1d2a4d4593e\") " pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.581170 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.584832 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.585194 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.586079 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.586262 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.586440 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-sdlt2" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.601581 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-7jcg4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.655067 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6kbd\" (UniqueName: \"kubernetes.io/projected/84bf7ad1-699f-4ba0-a3ce-75e46a590646-kube-api-access-q6kbd\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.655574 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.655630 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-config\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.655670 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.655702 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.729262 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.758306 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-config\") pod \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\" (UID: \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\") " Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.758496 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-dns-svc\") pod \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\" (UID: \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\") " Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.758566 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l26kx\" (UniqueName: \"kubernetes.io/projected/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-kube-api-access-l26kx\") pod \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\" (UID: \"cf387b8c-24e2-43e5-a1c7-65b876b98b8d\") " Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759018 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759085 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759116 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-config\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759165 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759205 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-config\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759253 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759282 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759307 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-scripts\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759339 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759366 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkc8q\" (UniqueName: \"kubernetes.io/projected/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-kube-api-access-tkc8q\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759392 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6kbd\" (UniqueName: \"kubernetes.io/projected/84bf7ad1-699f-4ba0-a3ce-75e46a590646-kube-api-access-q6kbd\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759428 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.759832 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-config" (OuterVolumeSpecName: "config") pod "cf387b8c-24e2-43e5-a1c7-65b876b98b8d" (UID: "cf387b8c-24e2-43e5-a1c7-65b876b98b8d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.760745 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.760886 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.761371 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.761965 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-config\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.762857 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cf387b8c-24e2-43e5-a1c7-65b876b98b8d" (UID: "cf387b8c-24e2-43e5-a1c7-65b876b98b8d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.765513 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-kube-api-access-l26kx" (OuterVolumeSpecName: "kube-api-access-l26kx") pod "cf387b8c-24e2-43e5-a1c7-65b876b98b8d" (UID: "cf387b8c-24e2-43e5-a1c7-65b876b98b8d"). InnerVolumeSpecName "kube-api-access-l26kx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.780829 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6kbd\" (UniqueName: \"kubernetes.io/projected/84bf7ad1-699f-4ba0-a3ce-75e46a590646-kube-api-access-q6kbd\") pod \"dnsmasq-dns-5c7b6b5695-5rdjt\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.861177 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.861236 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-scripts\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.861282 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkc8q\" (UniqueName: \"kubernetes.io/projected/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-kube-api-access-tkc8q\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.861313 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.861361 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.861404 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-config\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.861461 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.861540 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.861555 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l26kx\" (UniqueName: \"kubernetes.io/projected/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-kube-api-access-l26kx\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.861570 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf387b8c-24e2-43e5-a1c7-65b876b98b8d-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.862518 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.862902 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-scripts\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.863358 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-config\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.868936 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.869097 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.869817 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.897781 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkc8q\" (UniqueName: \"kubernetes.io/projected/2ba620e1-8d84-4ce2-acca-5d0b2df703d0-kube-api-access-tkc8q\") pod \"ovn-northd-0\" (UID: \"2ba620e1-8d84-4ce2-acca-5d0b2df703d0\") " pod="openstack/ovn-northd-0" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.918227 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-q77d4" event={"ID":"cf387b8c-24e2-43e5-a1c7-65b876b98b8d","Type":"ContainerDied","Data":"2fedeeabf5c32cc27179885e25e6a82a871748660b946d765ec79fb5e4b63d6d"} Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.918465 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-q77d4" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.924745 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.932990 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" event={"ID":"d7d75985-cb77-46fc-ab4c-59a81c36cd4c","Type":"ContainerDied","Data":"02ff8db2c1c500c1b999fd015a6ada3fd2c0bac029a0696ed7b9ba8f793312e9"} Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.933059 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02ff8db2c1c500c1b999fd015a6ada3fd2c0bac029a0696ed7b9ba8f793312e9" Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.941180 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6b4060af-fd4c-49d5-980e-a496a2fcfbd5","Type":"ContainerStarted","Data":"aeb53d5a7b7566efa1a7688a73cf3972bae796ecb8f32f06861874992bb1b47d"} Nov 25 15:37:07 crc kubenswrapper[4800]: I1125 15:37:07.951365 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.060146 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.157512 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-q77d4"] Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.171684 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-dns-svc\") pod \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\" (UID: \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\") " Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.172458 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7xht\" (UniqueName: \"kubernetes.io/projected/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-kube-api-access-w7xht\") pod \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\" (UID: \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\") " Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.172538 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d7d75985-cb77-46fc-ab4c-59a81c36cd4c" (UID: "d7d75985-cb77-46fc-ab4c-59a81c36cd4c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.172609 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-config\") pod \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\" (UID: \"d7d75985-cb77-46fc-ab4c-59a81c36cd4c\") " Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.173773 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-config" (OuterVolumeSpecName: "config") pod "d7d75985-cb77-46fc-ab4c-59a81c36cd4c" (UID: "d7d75985-cb77-46fc-ab4c-59a81c36cd4c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.179870 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.179989 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.180282 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-kube-api-access-w7xht" (OuterVolumeSpecName: "kube-api-access-w7xht") pod "d7d75985-cb77-46fc-ab4c-59a81c36cd4c" (UID: "d7d75985-cb77-46fc-ab4c-59a81c36cd4c"). InnerVolumeSpecName "kube-api-access-w7xht". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.190731 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-q77d4"] Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.212976 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5848494dd9-tx4tn"] Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.281937 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7xht\" (UniqueName: \"kubernetes.io/projected/d7d75985-cb77-46fc-ab4c-59a81c36cd4c-kube-api-access-w7xht\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.410776 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-7jcg4"] Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.541377 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.550074 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-5rdjt"] Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.952741 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" event={"ID":"a2a07cf0-e668-43c3-bc9a-8594243f1d02","Type":"ContainerStarted","Data":"2836fd8e5cf597edcb7edd19096fe074494e9c09ebe144346baf7e9428e7ee58"} Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.956078 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-7jcg4" event={"ID":"b7276fee-e7c9-4661-bc71-c1d2a4d4593e","Type":"ContainerStarted","Data":"62925793c22350101b6a429c6c551b01084861cd8a99f4a105031e3d7ee65bb7"} Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.956121 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-7jcg4" event={"ID":"b7276fee-e7c9-4661-bc71-c1d2a4d4593e","Type":"ContainerStarted","Data":"5feb75543bb6db1867807edd74573a8d95e3840a1f6110523b5a71c57ab7fcda"} Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.970313 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" event={"ID":"84bf7ad1-699f-4ba0-a3ce-75e46a590646","Type":"ContainerStarted","Data":"3a8e7fcdf30c45b29e78997c185b555c939e658e33b86460e101b82b1c489fbe"} Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.974756 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"2ba620e1-8d84-4ce2-acca-5d0b2df703d0","Type":"ContainerStarted","Data":"4cdbb64ccddc49d64fd76b9c6898503535bcd7f7703c492892f110f88de0d44e"} Nov 25 15:37:08 crc kubenswrapper[4800]: I1125 15:37:08.974951 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-n8mdf" Nov 25 15:37:09 crc kubenswrapper[4800]: I1125 15:37:09.000091 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-7jcg4" podStartSLOduration=2.000064895 podStartE2EDuration="2.000064895s" podCreationTimestamp="2025-11-25 15:37:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:37:08.997417562 +0000 UTC m=+1190.051826044" watchObservedRunningTime="2025-11-25 15:37:09.000064895 +0000 UTC m=+1190.054473387" Nov 25 15:37:09 crc kubenswrapper[4800]: I1125 15:37:09.232220 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-n8mdf"] Nov 25 15:37:09 crc kubenswrapper[4800]: I1125 15:37:09.243169 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-n8mdf"] Nov 25 15:37:09 crc kubenswrapper[4800]: I1125 15:37:09.803698 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf387b8c-24e2-43e5-a1c7-65b876b98b8d" path="/var/lib/kubelet/pods/cf387b8c-24e2-43e5-a1c7-65b876b98b8d/volumes" Nov 25 15:37:09 crc kubenswrapper[4800]: I1125 15:37:09.804302 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7d75985-cb77-46fc-ab4c-59a81c36cd4c" path="/var/lib/kubelet/pods/d7d75985-cb77-46fc-ab4c-59a81c36cd4c/volumes" Nov 25 15:37:09 crc kubenswrapper[4800]: I1125 15:37:09.985345 4800 generic.go:334] "Generic (PLEG): container finished" podID="84bf7ad1-699f-4ba0-a3ce-75e46a590646" containerID="77be9ba1326915a0807b0ff1ceef231623abab09e9a0c5db006a92c2a0c2308e" exitCode=0 Nov 25 15:37:09 crc kubenswrapper[4800]: I1125 15:37:09.985508 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" event={"ID":"84bf7ad1-699f-4ba0-a3ce-75e46a590646","Type":"ContainerDied","Data":"77be9ba1326915a0807b0ff1ceef231623abab09e9a0c5db006a92c2a0c2308e"} Nov 25 15:37:09 crc kubenswrapper[4800]: I1125 15:37:09.989362 4800 generic.go:334] "Generic (PLEG): container finished" podID="a2a07cf0-e668-43c3-bc9a-8594243f1d02" containerID="27e0fdc65dcd334eaeb0738129198eed8d9974e1082689dff75e407ed94c90c1" exitCode=0 Nov 25 15:37:09 crc kubenswrapper[4800]: I1125 15:37:09.989428 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" event={"ID":"a2a07cf0-e668-43c3-bc9a-8594243f1d02","Type":"ContainerDied","Data":"27e0fdc65dcd334eaeb0738129198eed8d9974e1082689dff75e407ed94c90c1"} Nov 25 15:37:10 crc kubenswrapper[4800]: I1125 15:37:10.507901 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 25 15:37:10 crc kubenswrapper[4800]: I1125 15:37:10.508383 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 25 15:37:10 crc kubenswrapper[4800]: I1125 15:37:10.594162 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 25 15:37:11 crc kubenswrapper[4800]: I1125 15:37:11.000993 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" event={"ID":"84bf7ad1-699f-4ba0-a3ce-75e46a590646","Type":"ContainerStarted","Data":"3198b91d6c6eb6bcef3240f08a53bfda03ec14319b9627db88bc4570be9b2ad6"} Nov 25 15:37:11 crc kubenswrapper[4800]: I1125 15:37:11.001660 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:11 crc kubenswrapper[4800]: I1125 15:37:11.004232 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"2ba620e1-8d84-4ce2-acca-5d0b2df703d0","Type":"ContainerStarted","Data":"35278b2c3a00eb3e540a41bbb9225d9332c05f2e1ccf611aed5d27d2f810fbcd"} Nov 25 15:37:11 crc kubenswrapper[4800]: I1125 15:37:11.004435 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 25 15:37:11 crc kubenswrapper[4800]: I1125 15:37:11.006968 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" event={"ID":"a2a07cf0-e668-43c3-bc9a-8594243f1d02","Type":"ContainerStarted","Data":"e5417488c041b5d6fc6bf022eb82f8d1134145d6e160f9a7c16274c8451ef508"} Nov 25 15:37:11 crc kubenswrapper[4800]: I1125 15:37:11.007417 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:11 crc kubenswrapper[4800]: I1125 15:37:11.052822 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" podStartSLOduration=3.587282067 podStartE2EDuration="4.052799622s" podCreationTimestamp="2025-11-25 15:37:07 +0000 UTC" firstStartedPulling="2025-11-25 15:37:08.558994242 +0000 UTC m=+1189.613402734" lastFinishedPulling="2025-11-25 15:37:09.024511807 +0000 UTC m=+1190.078920289" observedRunningTime="2025-11-25 15:37:11.045733738 +0000 UTC m=+1192.100142230" watchObservedRunningTime="2025-11-25 15:37:11.052799622 +0000 UTC m=+1192.107208104" Nov 25 15:37:11 crc kubenswrapper[4800]: I1125 15:37:11.069424 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" podStartSLOduration=3.34942909 podStartE2EDuration="4.069401719s" podCreationTimestamp="2025-11-25 15:37:07 +0000 UTC" firstStartedPulling="2025-11-25 15:37:08.165536422 +0000 UTC m=+1189.219944904" lastFinishedPulling="2025-11-25 15:37:08.885509041 +0000 UTC m=+1189.939917533" observedRunningTime="2025-11-25 15:37:11.067237981 +0000 UTC m=+1192.121646483" watchObservedRunningTime="2025-11-25 15:37:11.069401719 +0000 UTC m=+1192.123810201" Nov 25 15:37:11 crc kubenswrapper[4800]: I1125 15:37:11.087900 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.088744075 podStartE2EDuration="4.087881719s" podCreationTimestamp="2025-11-25 15:37:07 +0000 UTC" firstStartedPulling="2025-11-25 15:37:08.559030603 +0000 UTC m=+1189.613439085" lastFinishedPulling="2025-11-25 15:37:10.558168247 +0000 UTC m=+1191.612576729" observedRunningTime="2025-11-25 15:37:11.087293313 +0000 UTC m=+1192.141701785" watchObservedRunningTime="2025-11-25 15:37:11.087881719 +0000 UTC m=+1192.142290201" Nov 25 15:37:11 crc kubenswrapper[4800]: I1125 15:37:11.124826 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 25 15:37:12 crc kubenswrapper[4800]: I1125 15:37:12.019247 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"2ba620e1-8d84-4ce2-acca-5d0b2df703d0","Type":"ContainerStarted","Data":"80422a21e07eaf7ea4810fc43460bcbc55c2105fa3b214a32f933b6dfec7ac51"} Nov 25 15:37:12 crc kubenswrapper[4800]: I1125 15:37:12.022025 4800 generic.go:334] "Generic (PLEG): container finished" podID="6b4060af-fd4c-49d5-980e-a496a2fcfbd5" containerID="aeb53d5a7b7566efa1a7688a73cf3972bae796ecb8f32f06861874992bb1b47d" exitCode=0 Nov 25 15:37:12 crc kubenswrapper[4800]: I1125 15:37:12.022143 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6b4060af-fd4c-49d5-980e-a496a2fcfbd5","Type":"ContainerDied","Data":"aeb53d5a7b7566efa1a7688a73cf3972bae796ecb8f32f06861874992bb1b47d"} Nov 25 15:37:12 crc kubenswrapper[4800]: I1125 15:37:12.264258 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 15:37:12 crc kubenswrapper[4800]: I1125 15:37:12.640715 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:37:12 crc kubenswrapper[4800]: I1125 15:37:12.640801 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:37:13 crc kubenswrapper[4800]: I1125 15:37:13.046583 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6b4060af-fd4c-49d5-980e-a496a2fcfbd5","Type":"ContainerStarted","Data":"6e8d3f4a7f0be03681f5145a468a03656d7fdbb68a2046d6f5fde3a182524591"} Nov 25 15:37:13 crc kubenswrapper[4800]: I1125 15:37:13.084136 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371990.770662 podStartE2EDuration="46.084114192s" podCreationTimestamp="2025-11-25 15:36:27 +0000 UTC" firstStartedPulling="2025-11-25 15:36:29.716368047 +0000 UTC m=+1150.770776529" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:37:13.074204788 +0000 UTC m=+1194.128613280" watchObservedRunningTime="2025-11-25 15:37:13.084114192 +0000 UTC m=+1194.138522674" Nov 25 15:37:17 crc kubenswrapper[4800]: I1125 15:37:17.587273 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:17 crc kubenswrapper[4800]: I1125 15:37:17.927055 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:37:17 crc kubenswrapper[4800]: I1125 15:37:17.988763 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5848494dd9-tx4tn"] Nov 25 15:37:18 crc kubenswrapper[4800]: I1125 15:37:18.093118 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" podUID="a2a07cf0-e668-43c3-bc9a-8594243f1d02" containerName="dnsmasq-dns" containerID="cri-o://e5417488c041b5d6fc6bf022eb82f8d1134145d6e160f9a7c16274c8451ef508" gracePeriod=10 Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.070205 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.070714 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.104365 4800 generic.go:334] "Generic (PLEG): container finished" podID="a2a07cf0-e668-43c3-bc9a-8594243f1d02" containerID="e5417488c041b5d6fc6bf022eb82f8d1134145d6e160f9a7c16274c8451ef508" exitCode=0 Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.104433 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" event={"ID":"a2a07cf0-e668-43c3-bc9a-8594243f1d02","Type":"ContainerDied","Data":"e5417488c041b5d6fc6bf022eb82f8d1134145d6e160f9a7c16274c8451ef508"} Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.104474 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" event={"ID":"a2a07cf0-e668-43c3-bc9a-8594243f1d02","Type":"ContainerDied","Data":"2836fd8e5cf597edcb7edd19096fe074494e9c09ebe144346baf7e9428e7ee58"} Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.104494 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2836fd8e5cf597edcb7edd19096fe074494e9c09ebe144346baf7e9428e7ee58" Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.127383 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.246712 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-dns-svc\") pod \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.247777 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-ovsdbserver-sb\") pod \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.247871 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-config\") pod \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.248018 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fsqnk\" (UniqueName: \"kubernetes.io/projected/a2a07cf0-e668-43c3-bc9a-8594243f1d02-kube-api-access-fsqnk\") pod \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\" (UID: \"a2a07cf0-e668-43c3-bc9a-8594243f1d02\") " Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.254529 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2a07cf0-e668-43c3-bc9a-8594243f1d02-kube-api-access-fsqnk" (OuterVolumeSpecName: "kube-api-access-fsqnk") pod "a2a07cf0-e668-43c3-bc9a-8594243f1d02" (UID: "a2a07cf0-e668-43c3-bc9a-8594243f1d02"). InnerVolumeSpecName "kube-api-access-fsqnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.293823 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-config" (OuterVolumeSpecName: "config") pod "a2a07cf0-e668-43c3-bc9a-8594243f1d02" (UID: "a2a07cf0-e668-43c3-bc9a-8594243f1d02"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.293867 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a2a07cf0-e668-43c3-bc9a-8594243f1d02" (UID: "a2a07cf0-e668-43c3-bc9a-8594243f1d02"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.299348 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a2a07cf0-e668-43c3-bc9a-8594243f1d02" (UID: "a2a07cf0-e668-43c3-bc9a-8594243f1d02"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.350908 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fsqnk\" (UniqueName: \"kubernetes.io/projected/a2a07cf0-e668-43c3-bc9a-8594243f1d02-kube-api-access-fsqnk\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.350989 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.351067 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:19 crc kubenswrapper[4800]: I1125 15:37:19.351084 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2a07cf0-e668-43c3-bc9a-8594243f1d02-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:20 crc kubenswrapper[4800]: I1125 15:37:20.119467 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5848494dd9-tx4tn" Nov 25 15:37:20 crc kubenswrapper[4800]: I1125 15:37:20.156558 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5848494dd9-tx4tn"] Nov 25 15:37:20 crc kubenswrapper[4800]: I1125 15:37:20.162772 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5848494dd9-tx4tn"] Nov 25 15:37:21 crc kubenswrapper[4800]: I1125 15:37:21.795604 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2a07cf0-e668-43c3-bc9a-8594243f1d02" path="/var/lib/kubelet/pods/a2a07cf0-e668-43c3-bc9a-8594243f1d02/volumes" Nov 25 15:37:23 crc kubenswrapper[4800]: I1125 15:37:23.018088 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 25 15:37:23 crc kubenswrapper[4800]: I1125 15:37:23.873512 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 15:37:23 crc kubenswrapper[4800]: I1125 15:37:23.962010 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 15:37:27 crc kubenswrapper[4800]: E1125 15:37:27.334075 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2a07cf0_e668_43c3_bc9a_8594243f1d02.slice/crio-2836fd8e5cf597edcb7edd19096fe074494e9c09ebe144346baf7e9428e7ee58\": RecentStats: unable to find data in memory cache]" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.080617 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-99n98"] Nov 25 15:37:30 crc kubenswrapper[4800]: E1125 15:37:30.081616 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2a07cf0-e668-43c3-bc9a-8594243f1d02" containerName="init" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.081633 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2a07cf0-e668-43c3-bc9a-8594243f1d02" containerName="init" Nov 25 15:37:30 crc kubenswrapper[4800]: E1125 15:37:30.081653 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2a07cf0-e668-43c3-bc9a-8594243f1d02" containerName="dnsmasq-dns" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.081661 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2a07cf0-e668-43c3-bc9a-8594243f1d02" containerName="dnsmasq-dns" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.081836 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2a07cf0-e668-43c3-bc9a-8594243f1d02" containerName="dnsmasq-dns" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.082575 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-99n98" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.089678 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-a0ce-account-create-b766g"] Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.091630 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a0ce-account-create-b766g" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.094039 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.099606 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-99n98"] Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.111402 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-a0ce-account-create-b766g"] Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.254382 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-96nfb"] Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.255805 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-96nfb" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.262326 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-96nfb"] Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.268564 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3699aa7f-d6e9-45ea-8988-51ab0811f43c-operator-scripts\") pod \"keystone-a0ce-account-create-b766g\" (UID: \"3699aa7f-d6e9-45ea-8988-51ab0811f43c\") " pod="openstack/keystone-a0ce-account-create-b766g" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.268865 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/780bb63d-407c-41c6-8dc0-6e03a4b904fd-operator-scripts\") pod \"keystone-db-create-99n98\" (UID: \"780bb63d-407c-41c6-8dc0-6e03a4b904fd\") " pod="openstack/keystone-db-create-99n98" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.269008 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vq862\" (UniqueName: \"kubernetes.io/projected/780bb63d-407c-41c6-8dc0-6e03a4b904fd-kube-api-access-vq862\") pod \"keystone-db-create-99n98\" (UID: \"780bb63d-407c-41c6-8dc0-6e03a4b904fd\") " pod="openstack/keystone-db-create-99n98" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.269051 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdqmt\" (UniqueName: \"kubernetes.io/projected/3699aa7f-d6e9-45ea-8988-51ab0811f43c-kube-api-access-mdqmt\") pod \"keystone-a0ce-account-create-b766g\" (UID: \"3699aa7f-d6e9-45ea-8988-51ab0811f43c\") " pod="openstack/keystone-a0ce-account-create-b766g" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.364733 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-d636-account-create-6fnz8"] Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.366929 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d636-account-create-6fnz8" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.369206 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.370098 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0bbad3a-0877-42a9-9b3b-8102f399768d-operator-scripts\") pod \"placement-db-create-96nfb\" (UID: \"a0bbad3a-0877-42a9-9b3b-8102f399768d\") " pod="openstack/placement-db-create-96nfb" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.370308 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3699aa7f-d6e9-45ea-8988-51ab0811f43c-operator-scripts\") pod \"keystone-a0ce-account-create-b766g\" (UID: \"3699aa7f-d6e9-45ea-8988-51ab0811f43c\") " pod="openstack/keystone-a0ce-account-create-b766g" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.370567 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/780bb63d-407c-41c6-8dc0-6e03a4b904fd-operator-scripts\") pod \"keystone-db-create-99n98\" (UID: \"780bb63d-407c-41c6-8dc0-6e03a4b904fd\") " pod="openstack/keystone-db-create-99n98" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.370751 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vq862\" (UniqueName: \"kubernetes.io/projected/780bb63d-407c-41c6-8dc0-6e03a4b904fd-kube-api-access-vq862\") pod \"keystone-db-create-99n98\" (UID: \"780bb63d-407c-41c6-8dc0-6e03a4b904fd\") " pod="openstack/keystone-db-create-99n98" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.370853 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdqmt\" (UniqueName: \"kubernetes.io/projected/3699aa7f-d6e9-45ea-8988-51ab0811f43c-kube-api-access-mdqmt\") pod \"keystone-a0ce-account-create-b766g\" (UID: \"3699aa7f-d6e9-45ea-8988-51ab0811f43c\") " pod="openstack/keystone-a0ce-account-create-b766g" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.371156 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzdr8\" (UniqueName: \"kubernetes.io/projected/a0bbad3a-0877-42a9-9b3b-8102f399768d-kube-api-access-bzdr8\") pod \"placement-db-create-96nfb\" (UID: \"a0bbad3a-0877-42a9-9b3b-8102f399768d\") " pod="openstack/placement-db-create-96nfb" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.371401 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3699aa7f-d6e9-45ea-8988-51ab0811f43c-operator-scripts\") pod \"keystone-a0ce-account-create-b766g\" (UID: \"3699aa7f-d6e9-45ea-8988-51ab0811f43c\") " pod="openstack/keystone-a0ce-account-create-b766g" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.372520 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/780bb63d-407c-41c6-8dc0-6e03a4b904fd-operator-scripts\") pod \"keystone-db-create-99n98\" (UID: \"780bb63d-407c-41c6-8dc0-6e03a4b904fd\") " pod="openstack/keystone-db-create-99n98" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.377158 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-d636-account-create-6fnz8"] Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.393181 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vq862\" (UniqueName: \"kubernetes.io/projected/780bb63d-407c-41c6-8dc0-6e03a4b904fd-kube-api-access-vq862\") pod \"keystone-db-create-99n98\" (UID: \"780bb63d-407c-41c6-8dc0-6e03a4b904fd\") " pod="openstack/keystone-db-create-99n98" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.398761 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdqmt\" (UniqueName: \"kubernetes.io/projected/3699aa7f-d6e9-45ea-8988-51ab0811f43c-kube-api-access-mdqmt\") pod \"keystone-a0ce-account-create-b766g\" (UID: \"3699aa7f-d6e9-45ea-8988-51ab0811f43c\") " pod="openstack/keystone-a0ce-account-create-b766g" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.420021 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-99n98" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.423516 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a0ce-account-create-b766g" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.472465 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzdr8\" (UniqueName: \"kubernetes.io/projected/a0bbad3a-0877-42a9-9b3b-8102f399768d-kube-api-access-bzdr8\") pod \"placement-db-create-96nfb\" (UID: \"a0bbad3a-0877-42a9-9b3b-8102f399768d\") " pod="openstack/placement-db-create-96nfb" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.472518 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpqff\" (UniqueName: \"kubernetes.io/projected/b4cb5047-76f0-42c5-91a3-24cf5274f77b-kube-api-access-jpqff\") pod \"placement-d636-account-create-6fnz8\" (UID: \"b4cb5047-76f0-42c5-91a3-24cf5274f77b\") " pod="openstack/placement-d636-account-create-6fnz8" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.472567 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0bbad3a-0877-42a9-9b3b-8102f399768d-operator-scripts\") pod \"placement-db-create-96nfb\" (UID: \"a0bbad3a-0877-42a9-9b3b-8102f399768d\") " pod="openstack/placement-db-create-96nfb" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.472624 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4cb5047-76f0-42c5-91a3-24cf5274f77b-operator-scripts\") pod \"placement-d636-account-create-6fnz8\" (UID: \"b4cb5047-76f0-42c5-91a3-24cf5274f77b\") " pod="openstack/placement-d636-account-create-6fnz8" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.473439 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0bbad3a-0877-42a9-9b3b-8102f399768d-operator-scripts\") pod \"placement-db-create-96nfb\" (UID: \"a0bbad3a-0877-42a9-9b3b-8102f399768d\") " pod="openstack/placement-db-create-96nfb" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.499414 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzdr8\" (UniqueName: \"kubernetes.io/projected/a0bbad3a-0877-42a9-9b3b-8102f399768d-kube-api-access-bzdr8\") pod \"placement-db-create-96nfb\" (UID: \"a0bbad3a-0877-42a9-9b3b-8102f399768d\") " pod="openstack/placement-db-create-96nfb" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.574497 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4cb5047-76f0-42c5-91a3-24cf5274f77b-operator-scripts\") pod \"placement-d636-account-create-6fnz8\" (UID: \"b4cb5047-76f0-42c5-91a3-24cf5274f77b\") " pod="openstack/placement-d636-account-create-6fnz8" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.574815 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpqff\" (UniqueName: \"kubernetes.io/projected/b4cb5047-76f0-42c5-91a3-24cf5274f77b-kube-api-access-jpqff\") pod \"placement-d636-account-create-6fnz8\" (UID: \"b4cb5047-76f0-42c5-91a3-24cf5274f77b\") " pod="openstack/placement-d636-account-create-6fnz8" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.576298 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4cb5047-76f0-42c5-91a3-24cf5274f77b-operator-scripts\") pod \"placement-d636-account-create-6fnz8\" (UID: \"b4cb5047-76f0-42c5-91a3-24cf5274f77b\") " pod="openstack/placement-d636-account-create-6fnz8" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.576322 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-96nfb" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.579876 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-w5nq9"] Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.581516 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-w5nq9" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.600520 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-w5nq9"] Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.602535 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpqff\" (UniqueName: \"kubernetes.io/projected/b4cb5047-76f0-42c5-91a3-24cf5274f77b-kube-api-access-jpqff\") pod \"placement-d636-account-create-6fnz8\" (UID: \"b4cb5047-76f0-42c5-91a3-24cf5274f77b\") " pod="openstack/placement-d636-account-create-6fnz8" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.679425 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-4503-account-create-mqjsr"] Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.680518 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4503-account-create-mqjsr" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.685313 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.690607 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-4503-account-create-mqjsr"] Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.691058 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d636-account-create-6fnz8" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.786016 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ltc2\" (UniqueName: \"kubernetes.io/projected/c5212761-3a01-4a92-92bd-bb4f82a0d011-kube-api-access-2ltc2\") pod \"glance-db-create-w5nq9\" (UID: \"c5212761-3a01-4a92-92bd-bb4f82a0d011\") " pod="openstack/glance-db-create-w5nq9" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.786557 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5212761-3a01-4a92-92bd-bb4f82a0d011-operator-scripts\") pod \"glance-db-create-w5nq9\" (UID: \"c5212761-3a01-4a92-92bd-bb4f82a0d011\") " pod="openstack/glance-db-create-w5nq9" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.786655 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qj4rn\" (UniqueName: \"kubernetes.io/projected/079c90b2-0054-4e36-8836-1e490ce9203c-kube-api-access-qj4rn\") pod \"glance-4503-account-create-mqjsr\" (UID: \"079c90b2-0054-4e36-8836-1e490ce9203c\") " pod="openstack/glance-4503-account-create-mqjsr" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.786684 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/079c90b2-0054-4e36-8836-1e490ce9203c-operator-scripts\") pod \"glance-4503-account-create-mqjsr\" (UID: \"079c90b2-0054-4e36-8836-1e490ce9203c\") " pod="openstack/glance-4503-account-create-mqjsr" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.890419 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5212761-3a01-4a92-92bd-bb4f82a0d011-operator-scripts\") pod \"glance-db-create-w5nq9\" (UID: \"c5212761-3a01-4a92-92bd-bb4f82a0d011\") " pod="openstack/glance-db-create-w5nq9" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.890505 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qj4rn\" (UniqueName: \"kubernetes.io/projected/079c90b2-0054-4e36-8836-1e490ce9203c-kube-api-access-qj4rn\") pod \"glance-4503-account-create-mqjsr\" (UID: \"079c90b2-0054-4e36-8836-1e490ce9203c\") " pod="openstack/glance-4503-account-create-mqjsr" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.890532 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/079c90b2-0054-4e36-8836-1e490ce9203c-operator-scripts\") pod \"glance-4503-account-create-mqjsr\" (UID: \"079c90b2-0054-4e36-8836-1e490ce9203c\") " pod="openstack/glance-4503-account-create-mqjsr" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.890608 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ltc2\" (UniqueName: \"kubernetes.io/projected/c5212761-3a01-4a92-92bd-bb4f82a0d011-kube-api-access-2ltc2\") pod \"glance-db-create-w5nq9\" (UID: \"c5212761-3a01-4a92-92bd-bb4f82a0d011\") " pod="openstack/glance-db-create-w5nq9" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.891279 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5212761-3a01-4a92-92bd-bb4f82a0d011-operator-scripts\") pod \"glance-db-create-w5nq9\" (UID: \"c5212761-3a01-4a92-92bd-bb4f82a0d011\") " pod="openstack/glance-db-create-w5nq9" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.893143 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/079c90b2-0054-4e36-8836-1e490ce9203c-operator-scripts\") pod \"glance-4503-account-create-mqjsr\" (UID: \"079c90b2-0054-4e36-8836-1e490ce9203c\") " pod="openstack/glance-4503-account-create-mqjsr" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.911404 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qj4rn\" (UniqueName: \"kubernetes.io/projected/079c90b2-0054-4e36-8836-1e490ce9203c-kube-api-access-qj4rn\") pod \"glance-4503-account-create-mqjsr\" (UID: \"079c90b2-0054-4e36-8836-1e490ce9203c\") " pod="openstack/glance-4503-account-create-mqjsr" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.913161 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ltc2\" (UniqueName: \"kubernetes.io/projected/c5212761-3a01-4a92-92bd-bb4f82a0d011-kube-api-access-2ltc2\") pod \"glance-db-create-w5nq9\" (UID: \"c5212761-3a01-4a92-92bd-bb4f82a0d011\") " pod="openstack/glance-db-create-w5nq9" Nov 25 15:37:30 crc kubenswrapper[4800]: I1125 15:37:30.975487 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-a0ce-account-create-b766g"] Nov 25 15:37:30 crc kubenswrapper[4800]: W1125 15:37:30.988235 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3699aa7f_d6e9_45ea_8988_51ab0811f43c.slice/crio-f2af575148d074cb9075b7b28b19aab073cff404f714d6cac646c52371aab961 WatchSource:0}: Error finding container f2af575148d074cb9075b7b28b19aab073cff404f714d6cac646c52371aab961: Status 404 returned error can't find the container with id f2af575148d074cb9075b7b28b19aab073cff404f714d6cac646c52371aab961 Nov 25 15:37:31 crc kubenswrapper[4800]: I1125 15:37:31.002379 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4503-account-create-mqjsr" Nov 25 15:37:31 crc kubenswrapper[4800]: I1125 15:37:31.048607 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-99n98"] Nov 25 15:37:31 crc kubenswrapper[4800]: W1125 15:37:31.054774 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod780bb63d_407c_41c6_8dc0_6e03a4b904fd.slice/crio-70d36a78fa65b5555689ba752984b4cb3e429ea5bc35e084a691a619dd20691b WatchSource:0}: Error finding container 70d36a78fa65b5555689ba752984b4cb3e429ea5bc35e084a691a619dd20691b: Status 404 returned error can't find the container with id 70d36a78fa65b5555689ba752984b4cb3e429ea5bc35e084a691a619dd20691b Nov 25 15:37:31 crc kubenswrapper[4800]: I1125 15:37:31.122769 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-96nfb"] Nov 25 15:37:31 crc kubenswrapper[4800]: W1125 15:37:31.134712 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda0bbad3a_0877_42a9_9b3b_8102f399768d.slice/crio-147d86d315de4ac411f2360a3beade3d7b4daed26719e77b0aba55711bcb5054 WatchSource:0}: Error finding container 147d86d315de4ac411f2360a3beade3d7b4daed26719e77b0aba55711bcb5054: Status 404 returned error can't find the container with id 147d86d315de4ac411f2360a3beade3d7b4daed26719e77b0aba55711bcb5054 Nov 25 15:37:31 crc kubenswrapper[4800]: I1125 15:37:31.183095 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-d636-account-create-6fnz8"] Nov 25 15:37:31 crc kubenswrapper[4800]: I1125 15:37:31.208084 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-w5nq9" Nov 25 15:37:31 crc kubenswrapper[4800]: I1125 15:37:31.217956 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d636-account-create-6fnz8" event={"ID":"b4cb5047-76f0-42c5-91a3-24cf5274f77b","Type":"ContainerStarted","Data":"e3197f7dcc3356fda0886f93477c532beeb0358306391d9ae28c171ed6b9c238"} Nov 25 15:37:31 crc kubenswrapper[4800]: I1125 15:37:31.219598 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-96nfb" event={"ID":"a0bbad3a-0877-42a9-9b3b-8102f399768d","Type":"ContainerStarted","Data":"147d86d315de4ac411f2360a3beade3d7b4daed26719e77b0aba55711bcb5054"} Nov 25 15:37:31 crc kubenswrapper[4800]: I1125 15:37:31.221286 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-99n98" event={"ID":"780bb63d-407c-41c6-8dc0-6e03a4b904fd","Type":"ContainerStarted","Data":"70d36a78fa65b5555689ba752984b4cb3e429ea5bc35e084a691a619dd20691b"} Nov 25 15:37:31 crc kubenswrapper[4800]: I1125 15:37:31.222773 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a0ce-account-create-b766g" event={"ID":"3699aa7f-d6e9-45ea-8988-51ab0811f43c","Type":"ContainerStarted","Data":"f2af575148d074cb9075b7b28b19aab073cff404f714d6cac646c52371aab961"} Nov 25 15:37:31 crc kubenswrapper[4800]: I1125 15:37:31.488118 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-4503-account-create-mqjsr"] Nov 25 15:37:31 crc kubenswrapper[4800]: W1125 15:37:31.505043 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod079c90b2_0054_4e36_8836_1e490ce9203c.slice/crio-a1c9ddd9302283ce556e1183eac04a9b527015d2ea42a2e133662115b5e3158b WatchSource:0}: Error finding container a1c9ddd9302283ce556e1183eac04a9b527015d2ea42a2e133662115b5e3158b: Status 404 returned error can't find the container with id a1c9ddd9302283ce556e1183eac04a9b527015d2ea42a2e133662115b5e3158b Nov 25 15:37:31 crc kubenswrapper[4800]: I1125 15:37:31.722155 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-w5nq9"] Nov 25 15:37:31 crc kubenswrapper[4800]: W1125 15:37:31.739770 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5212761_3a01_4a92_92bd_bb4f82a0d011.slice/crio-5fa917846b72fbe285b5f7eac1cfa584f4f739485187364e5ee7448736b8b2e9 WatchSource:0}: Error finding container 5fa917846b72fbe285b5f7eac1cfa584f4f739485187364e5ee7448736b8b2e9: Status 404 returned error can't find the container with id 5fa917846b72fbe285b5f7eac1cfa584f4f739485187364e5ee7448736b8b2e9 Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.234664 4800 generic.go:334] "Generic (PLEG): container finished" podID="a0bbad3a-0877-42a9-9b3b-8102f399768d" containerID="d4f6867f6fd82b227b474d6639a64646d69b0dbc982429ad7c5d7b85e6479182" exitCode=0 Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.234800 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-96nfb" event={"ID":"a0bbad3a-0877-42a9-9b3b-8102f399768d","Type":"ContainerDied","Data":"d4f6867f6fd82b227b474d6639a64646d69b0dbc982429ad7c5d7b85e6479182"} Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.237782 4800 generic.go:334] "Generic (PLEG): container finished" podID="780bb63d-407c-41c6-8dc0-6e03a4b904fd" containerID="3e420f4524fa3a852175ce114a113cb22785f6ac820fef1f7518b2d1c95ba4d6" exitCode=0 Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.237885 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-99n98" event={"ID":"780bb63d-407c-41c6-8dc0-6e03a4b904fd","Type":"ContainerDied","Data":"3e420f4524fa3a852175ce114a113cb22785f6ac820fef1f7518b2d1c95ba4d6"} Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.239888 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4503-account-create-mqjsr" event={"ID":"079c90b2-0054-4e36-8836-1e490ce9203c","Type":"ContainerStarted","Data":"e2784e7b40020c7a4b81decfe3bbf46edfbad8fd457ae2c0f009e7d54ce6e5e1"} Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.239938 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4503-account-create-mqjsr" event={"ID":"079c90b2-0054-4e36-8836-1e490ce9203c","Type":"ContainerStarted","Data":"a1c9ddd9302283ce556e1183eac04a9b527015d2ea42a2e133662115b5e3158b"} Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.242650 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-w5nq9" event={"ID":"c5212761-3a01-4a92-92bd-bb4f82a0d011","Type":"ContainerStarted","Data":"341352fd362a312240ea7d66abc5d59fabb822289e87582d2ac39b2b43684416"} Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.242763 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-w5nq9" event={"ID":"c5212761-3a01-4a92-92bd-bb4f82a0d011","Type":"ContainerStarted","Data":"5fa917846b72fbe285b5f7eac1cfa584f4f739485187364e5ee7448736b8b2e9"} Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.244767 4800 generic.go:334] "Generic (PLEG): container finished" podID="3699aa7f-d6e9-45ea-8988-51ab0811f43c" containerID="28aeb7e8e7eb56c3ea859debe777a996479d8f06bd882c093544ff345e179e48" exitCode=0 Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.244871 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a0ce-account-create-b766g" event={"ID":"3699aa7f-d6e9-45ea-8988-51ab0811f43c","Type":"ContainerDied","Data":"28aeb7e8e7eb56c3ea859debe777a996479d8f06bd882c093544ff345e179e48"} Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.248092 4800 generic.go:334] "Generic (PLEG): container finished" podID="b4cb5047-76f0-42c5-91a3-24cf5274f77b" containerID="d7f44252bcb12cfbfc1bfa560bdecbece3809b214ff9c5678657b9d0ee0446fa" exitCode=0 Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.248171 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d636-account-create-6fnz8" event={"ID":"b4cb5047-76f0-42c5-91a3-24cf5274f77b","Type":"ContainerDied","Data":"d7f44252bcb12cfbfc1bfa560bdecbece3809b214ff9c5678657b9d0ee0446fa"} Nov 25 15:37:32 crc kubenswrapper[4800]: I1125 15:37:32.269778 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-w5nq9" podStartSLOduration=2.269752737 podStartE2EDuration="2.269752737s" podCreationTimestamp="2025-11-25 15:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:37:32.268060141 +0000 UTC m=+1213.322468623" watchObservedRunningTime="2025-11-25 15:37:32.269752737 +0000 UTC m=+1213.324161219" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.258283 4800 generic.go:334] "Generic (PLEG): container finished" podID="079c90b2-0054-4e36-8836-1e490ce9203c" containerID="e2784e7b40020c7a4b81decfe3bbf46edfbad8fd457ae2c0f009e7d54ce6e5e1" exitCode=0 Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.258365 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4503-account-create-mqjsr" event={"ID":"079c90b2-0054-4e36-8836-1e490ce9203c","Type":"ContainerDied","Data":"e2784e7b40020c7a4b81decfe3bbf46edfbad8fd457ae2c0f009e7d54ce6e5e1"} Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.260828 4800 generic.go:334] "Generic (PLEG): container finished" podID="c5212761-3a01-4a92-92bd-bb4f82a0d011" containerID="341352fd362a312240ea7d66abc5d59fabb822289e87582d2ac39b2b43684416" exitCode=0 Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.260892 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-w5nq9" event={"ID":"c5212761-3a01-4a92-92bd-bb4f82a0d011","Type":"ContainerDied","Data":"341352fd362a312240ea7d66abc5d59fabb822289e87582d2ac39b2b43684416"} Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.666379 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a0ce-account-create-b766g" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.799178 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d636-account-create-6fnz8" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.807652 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-99n98" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.829664 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-96nfb" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.850499 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3699aa7f-d6e9-45ea-8988-51ab0811f43c-operator-scripts\") pod \"3699aa7f-d6e9-45ea-8988-51ab0811f43c\" (UID: \"3699aa7f-d6e9-45ea-8988-51ab0811f43c\") " Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.850576 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdqmt\" (UniqueName: \"kubernetes.io/projected/3699aa7f-d6e9-45ea-8988-51ab0811f43c-kube-api-access-mdqmt\") pod \"3699aa7f-d6e9-45ea-8988-51ab0811f43c\" (UID: \"3699aa7f-d6e9-45ea-8988-51ab0811f43c\") " Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.851274 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzdr8\" (UniqueName: \"kubernetes.io/projected/a0bbad3a-0877-42a9-9b3b-8102f399768d-kube-api-access-bzdr8\") pod \"a0bbad3a-0877-42a9-9b3b-8102f399768d\" (UID: \"a0bbad3a-0877-42a9-9b3b-8102f399768d\") " Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.851342 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4cb5047-76f0-42c5-91a3-24cf5274f77b-operator-scripts\") pod \"b4cb5047-76f0-42c5-91a3-24cf5274f77b\" (UID: \"b4cb5047-76f0-42c5-91a3-24cf5274f77b\") " Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.851387 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpqff\" (UniqueName: \"kubernetes.io/projected/b4cb5047-76f0-42c5-91a3-24cf5274f77b-kube-api-access-jpqff\") pod \"b4cb5047-76f0-42c5-91a3-24cf5274f77b\" (UID: \"b4cb5047-76f0-42c5-91a3-24cf5274f77b\") " Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.851417 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0bbad3a-0877-42a9-9b3b-8102f399768d-operator-scripts\") pod \"a0bbad3a-0877-42a9-9b3b-8102f399768d\" (UID: \"a0bbad3a-0877-42a9-9b3b-8102f399768d\") " Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.856418 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4cb5047-76f0-42c5-91a3-24cf5274f77b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b4cb5047-76f0-42c5-91a3-24cf5274f77b" (UID: "b4cb5047-76f0-42c5-91a3-24cf5274f77b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.856943 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3699aa7f-d6e9-45ea-8988-51ab0811f43c-kube-api-access-mdqmt" (OuterVolumeSpecName: "kube-api-access-mdqmt") pod "3699aa7f-d6e9-45ea-8988-51ab0811f43c" (UID: "3699aa7f-d6e9-45ea-8988-51ab0811f43c"). InnerVolumeSpecName "kube-api-access-mdqmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.857565 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3699aa7f-d6e9-45ea-8988-51ab0811f43c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3699aa7f-d6e9-45ea-8988-51ab0811f43c" (UID: "3699aa7f-d6e9-45ea-8988-51ab0811f43c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.859227 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0bbad3a-0877-42a9-9b3b-8102f399768d-kube-api-access-bzdr8" (OuterVolumeSpecName: "kube-api-access-bzdr8") pod "a0bbad3a-0877-42a9-9b3b-8102f399768d" (UID: "a0bbad3a-0877-42a9-9b3b-8102f399768d"). InnerVolumeSpecName "kube-api-access-bzdr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.859746 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0bbad3a-0877-42a9-9b3b-8102f399768d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a0bbad3a-0877-42a9-9b3b-8102f399768d" (UID: "a0bbad3a-0877-42a9-9b3b-8102f399768d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.860455 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4cb5047-76f0-42c5-91a3-24cf5274f77b-kube-api-access-jpqff" (OuterVolumeSpecName: "kube-api-access-jpqff") pod "b4cb5047-76f0-42c5-91a3-24cf5274f77b" (UID: "b4cb5047-76f0-42c5-91a3-24cf5274f77b"). InnerVolumeSpecName "kube-api-access-jpqff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.952155 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vq862\" (UniqueName: \"kubernetes.io/projected/780bb63d-407c-41c6-8dc0-6e03a4b904fd-kube-api-access-vq862\") pod \"780bb63d-407c-41c6-8dc0-6e03a4b904fd\" (UID: \"780bb63d-407c-41c6-8dc0-6e03a4b904fd\") " Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.952211 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/780bb63d-407c-41c6-8dc0-6e03a4b904fd-operator-scripts\") pod \"780bb63d-407c-41c6-8dc0-6e03a4b904fd\" (UID: \"780bb63d-407c-41c6-8dc0-6e03a4b904fd\") " Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.952583 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzdr8\" (UniqueName: \"kubernetes.io/projected/a0bbad3a-0877-42a9-9b3b-8102f399768d-kube-api-access-bzdr8\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.952599 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4cb5047-76f0-42c5-91a3-24cf5274f77b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.952608 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jpqff\" (UniqueName: \"kubernetes.io/projected/b4cb5047-76f0-42c5-91a3-24cf5274f77b-kube-api-access-jpqff\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.952617 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0bbad3a-0877-42a9-9b3b-8102f399768d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.952625 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3699aa7f-d6e9-45ea-8988-51ab0811f43c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.952634 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdqmt\" (UniqueName: \"kubernetes.io/projected/3699aa7f-d6e9-45ea-8988-51ab0811f43c-kube-api-access-mdqmt\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.952979 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/780bb63d-407c-41c6-8dc0-6e03a4b904fd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "780bb63d-407c-41c6-8dc0-6e03a4b904fd" (UID: "780bb63d-407c-41c6-8dc0-6e03a4b904fd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:33 crc kubenswrapper[4800]: I1125 15:37:33.955987 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/780bb63d-407c-41c6-8dc0-6e03a4b904fd-kube-api-access-vq862" (OuterVolumeSpecName: "kube-api-access-vq862") pod "780bb63d-407c-41c6-8dc0-6e03a4b904fd" (UID: "780bb63d-407c-41c6-8dc0-6e03a4b904fd"). InnerVolumeSpecName "kube-api-access-vq862". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.054430 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vq862\" (UniqueName: \"kubernetes.io/projected/780bb63d-407c-41c6-8dc0-6e03a4b904fd-kube-api-access-vq862\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.054486 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/780bb63d-407c-41c6-8dc0-6e03a4b904fd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.271151 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a0ce-account-create-b766g" event={"ID":"3699aa7f-d6e9-45ea-8988-51ab0811f43c","Type":"ContainerDied","Data":"f2af575148d074cb9075b7b28b19aab073cff404f714d6cac646c52371aab961"} Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.271238 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2af575148d074cb9075b7b28b19aab073cff404f714d6cac646c52371aab961" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.271179 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a0ce-account-create-b766g" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.274462 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-96nfb" event={"ID":"a0bbad3a-0877-42a9-9b3b-8102f399768d","Type":"ContainerDied","Data":"147d86d315de4ac411f2360a3beade3d7b4daed26719e77b0aba55711bcb5054"} Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.274495 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="147d86d315de4ac411f2360a3beade3d7b4daed26719e77b0aba55711bcb5054" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.274576 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-96nfb" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.276584 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d636-account-create-6fnz8" event={"ID":"b4cb5047-76f0-42c5-91a3-24cf5274f77b","Type":"ContainerDied","Data":"e3197f7dcc3356fda0886f93477c532beeb0358306391d9ae28c171ed6b9c238"} Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.276604 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d636-account-create-6fnz8" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.276613 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3197f7dcc3356fda0886f93477c532beeb0358306391d9ae28c171ed6b9c238" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.278802 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-99n98" event={"ID":"780bb63d-407c-41c6-8dc0-6e03a4b904fd","Type":"ContainerDied","Data":"70d36a78fa65b5555689ba752984b4cb3e429ea5bc35e084a691a619dd20691b"} Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.278872 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="70d36a78fa65b5555689ba752984b4cb3e429ea5bc35e084a691a619dd20691b" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.278987 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-99n98" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.870505 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-w5nq9" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.891706 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4503-account-create-mqjsr" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.973637 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ltc2\" (UniqueName: \"kubernetes.io/projected/c5212761-3a01-4a92-92bd-bb4f82a0d011-kube-api-access-2ltc2\") pod \"c5212761-3a01-4a92-92bd-bb4f82a0d011\" (UID: \"c5212761-3a01-4a92-92bd-bb4f82a0d011\") " Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.973826 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5212761-3a01-4a92-92bd-bb4f82a0d011-operator-scripts\") pod \"c5212761-3a01-4a92-92bd-bb4f82a0d011\" (UID: \"c5212761-3a01-4a92-92bd-bb4f82a0d011\") " Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.975905 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5212761-3a01-4a92-92bd-bb4f82a0d011-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c5212761-3a01-4a92-92bd-bb4f82a0d011" (UID: "c5212761-3a01-4a92-92bd-bb4f82a0d011"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:34 crc kubenswrapper[4800]: I1125 15:37:34.986190 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5212761-3a01-4a92-92bd-bb4f82a0d011-kube-api-access-2ltc2" (OuterVolumeSpecName: "kube-api-access-2ltc2") pod "c5212761-3a01-4a92-92bd-bb4f82a0d011" (UID: "c5212761-3a01-4a92-92bd-bb4f82a0d011"). InnerVolumeSpecName "kube-api-access-2ltc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.075904 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/079c90b2-0054-4e36-8836-1e490ce9203c-operator-scripts\") pod \"079c90b2-0054-4e36-8836-1e490ce9203c\" (UID: \"079c90b2-0054-4e36-8836-1e490ce9203c\") " Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.075985 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qj4rn\" (UniqueName: \"kubernetes.io/projected/079c90b2-0054-4e36-8836-1e490ce9203c-kube-api-access-qj4rn\") pod \"079c90b2-0054-4e36-8836-1e490ce9203c\" (UID: \"079c90b2-0054-4e36-8836-1e490ce9203c\") " Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.077784 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/079c90b2-0054-4e36-8836-1e490ce9203c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "079c90b2-0054-4e36-8836-1e490ce9203c" (UID: "079c90b2-0054-4e36-8836-1e490ce9203c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.078766 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/079c90b2-0054-4e36-8836-1e490ce9203c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.078793 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5212761-3a01-4a92-92bd-bb4f82a0d011-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.078805 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ltc2\" (UniqueName: \"kubernetes.io/projected/c5212761-3a01-4a92-92bd-bb4f82a0d011-kube-api-access-2ltc2\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.082115 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/079c90b2-0054-4e36-8836-1e490ce9203c-kube-api-access-qj4rn" (OuterVolumeSpecName: "kube-api-access-qj4rn") pod "079c90b2-0054-4e36-8836-1e490ce9203c" (UID: "079c90b2-0054-4e36-8836-1e490ce9203c"). InnerVolumeSpecName "kube-api-access-qj4rn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.180691 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qj4rn\" (UniqueName: \"kubernetes.io/projected/079c90b2-0054-4e36-8836-1e490ce9203c-kube-api-access-qj4rn\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.301356 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4503-account-create-mqjsr" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.301345 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4503-account-create-mqjsr" event={"ID":"079c90b2-0054-4e36-8836-1e490ce9203c","Type":"ContainerDied","Data":"a1c9ddd9302283ce556e1183eac04a9b527015d2ea42a2e133662115b5e3158b"} Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.301493 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1c9ddd9302283ce556e1183eac04a9b527015d2ea42a2e133662115b5e3158b" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.304024 4800 generic.go:334] "Generic (PLEG): container finished" podID="72be41d8-6678-467c-a4d5-c4340e488c1b" containerID="6a6b68af0650fee007aae70ba973e29d9cce12f19e8243d8b1dff41faa58f8ca" exitCode=0 Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.304085 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"72be41d8-6678-467c-a4d5-c4340e488c1b","Type":"ContainerDied","Data":"6a6b68af0650fee007aae70ba973e29d9cce12f19e8243d8b1dff41faa58f8ca"} Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.312682 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-w5nq9" event={"ID":"c5212761-3a01-4a92-92bd-bb4f82a0d011","Type":"ContainerDied","Data":"5fa917846b72fbe285b5f7eac1cfa584f4f739485187364e5ee7448736b8b2e9"} Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.312730 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5fa917846b72fbe285b5f7eac1cfa584f4f739485187364e5ee7448736b8b2e9" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.312809 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-w5nq9" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.315017 4800 generic.go:334] "Generic (PLEG): container finished" podID="46b2c800-efef-4668-9a57-c66ff504e0db" containerID="71e2b7cea92e41a8123ca4d13df2f0b15c8131951b2ee7c6fe406d4b808e7552" exitCode=0 Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.315047 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"46b2c800-efef-4668-9a57-c66ff504e0db","Type":"ContainerDied","Data":"71e2b7cea92e41a8123ca4d13df2f0b15c8131951b2ee7c6fe406d4b808e7552"} Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.629808 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-jmbtv" podUID="f0140b9d-bed7-44ae-a1d5-8e0acdb70742" containerName="ovn-controller" probeResult="failure" output=< Nov 25 15:37:35 crc kubenswrapper[4800]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 15:37:35 crc kubenswrapper[4800]: > Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.661457 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.663365 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-tklmv" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.904252 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-jmbtv-config-v45jr"] Nov 25 15:37:35 crc kubenswrapper[4800]: E1125 15:37:35.905257 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4cb5047-76f0-42c5-91a3-24cf5274f77b" containerName="mariadb-account-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.905283 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4cb5047-76f0-42c5-91a3-24cf5274f77b" containerName="mariadb-account-create" Nov 25 15:37:35 crc kubenswrapper[4800]: E1125 15:37:35.905303 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="079c90b2-0054-4e36-8836-1e490ce9203c" containerName="mariadb-account-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.905312 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="079c90b2-0054-4e36-8836-1e490ce9203c" containerName="mariadb-account-create" Nov 25 15:37:35 crc kubenswrapper[4800]: E1125 15:37:35.905327 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3699aa7f-d6e9-45ea-8988-51ab0811f43c" containerName="mariadb-account-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.905335 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="3699aa7f-d6e9-45ea-8988-51ab0811f43c" containerName="mariadb-account-create" Nov 25 15:37:35 crc kubenswrapper[4800]: E1125 15:37:35.905361 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="780bb63d-407c-41c6-8dc0-6e03a4b904fd" containerName="mariadb-database-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.905370 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="780bb63d-407c-41c6-8dc0-6e03a4b904fd" containerName="mariadb-database-create" Nov 25 15:37:35 crc kubenswrapper[4800]: E1125 15:37:35.905398 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0bbad3a-0877-42a9-9b3b-8102f399768d" containerName="mariadb-database-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.905406 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0bbad3a-0877-42a9-9b3b-8102f399768d" containerName="mariadb-database-create" Nov 25 15:37:35 crc kubenswrapper[4800]: E1125 15:37:35.905424 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5212761-3a01-4a92-92bd-bb4f82a0d011" containerName="mariadb-database-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.905432 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5212761-3a01-4a92-92bd-bb4f82a0d011" containerName="mariadb-database-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.905657 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5212761-3a01-4a92-92bd-bb4f82a0d011" containerName="mariadb-database-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.905677 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4cb5047-76f0-42c5-91a3-24cf5274f77b" containerName="mariadb-account-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.905691 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0bbad3a-0877-42a9-9b3b-8102f399768d" containerName="mariadb-database-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.905705 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="3699aa7f-d6e9-45ea-8988-51ab0811f43c" containerName="mariadb-account-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.905718 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="079c90b2-0054-4e36-8836-1e490ce9203c" containerName="mariadb-account-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.905733 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="780bb63d-407c-41c6-8dc0-6e03a4b904fd" containerName="mariadb-database-create" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.906631 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.911826 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.925180 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jmbtv-config-v45jr"] Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.997277 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tds6\" (UniqueName: \"kubernetes.io/projected/ba1dca90-fab6-485f-9fc6-bb577628ae33-kube-api-access-7tds6\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.997340 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-run\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.997421 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ba1dca90-fab6-485f-9fc6-bb577628ae33-scripts\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.997451 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-run-ovn\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.997515 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-log-ovn\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:35 crc kubenswrapper[4800]: I1125 15:37:35.997535 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ba1dca90-fab6-485f-9fc6-bb577628ae33-additional-scripts\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.099930 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ba1dca90-fab6-485f-9fc6-bb577628ae33-scripts\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.100264 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-run-ovn\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.100431 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-log-ovn\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.100508 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ba1dca90-fab6-485f-9fc6-bb577628ae33-additional-scripts\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.100632 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tds6\" (UniqueName: \"kubernetes.io/projected/ba1dca90-fab6-485f-9fc6-bb577628ae33-kube-api-access-7tds6\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.100718 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-run\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.100721 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-log-ovn\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.100666 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-run-ovn\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.100807 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-run\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.101661 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ba1dca90-fab6-485f-9fc6-bb577628ae33-additional-scripts\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.104534 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ba1dca90-fab6-485f-9fc6-bb577628ae33-scripts\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.122541 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tds6\" (UniqueName: \"kubernetes.io/projected/ba1dca90-fab6-485f-9fc6-bb577628ae33-kube-api-access-7tds6\") pod \"ovn-controller-jmbtv-config-v45jr\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.231964 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.331151 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"72be41d8-6678-467c-a4d5-c4340e488c1b","Type":"ContainerStarted","Data":"7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2"} Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.331526 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.338808 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"46b2c800-efef-4668-9a57-c66ff504e0db","Type":"ContainerStarted","Data":"1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050"} Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.339961 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.378884 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.895493579000004 podStartE2EDuration="1m11.378859274s" podCreationTimestamp="2025-11-25 15:36:25 +0000 UTC" firstStartedPulling="2025-11-25 15:36:28.009933133 +0000 UTC m=+1149.064341615" lastFinishedPulling="2025-11-25 15:37:00.493298828 +0000 UTC m=+1181.547707310" observedRunningTime="2025-11-25 15:37:36.372837778 +0000 UTC m=+1217.427329963" watchObservedRunningTime="2025-11-25 15:37:36.378859274 +0000 UTC m=+1217.433267756" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.415445 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371966.43997 podStartE2EDuration="1m10.414805014s" podCreationTimestamp="2025-11-25 15:36:26 +0000 UTC" firstStartedPulling="2025-11-25 15:36:28.138991122 +0000 UTC m=+1149.193399604" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:37:36.40233791 +0000 UTC m=+1217.456746402" watchObservedRunningTime="2025-11-25 15:37:36.414805014 +0000 UTC m=+1217.469213496" Nov 25 15:37:36 crc kubenswrapper[4800]: I1125 15:37:36.690638 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jmbtv-config-v45jr"] Nov 25 15:37:37 crc kubenswrapper[4800]: I1125 15:37:37.349528 4800 generic.go:334] "Generic (PLEG): container finished" podID="ba1dca90-fab6-485f-9fc6-bb577628ae33" containerID="fc863da79b2daeddca9a76f0a902b0204dda157c4501b808f1b4fa8a7ca04c0c" exitCode=0 Nov 25 15:37:37 crc kubenswrapper[4800]: I1125 15:37:37.349662 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jmbtv-config-v45jr" event={"ID":"ba1dca90-fab6-485f-9fc6-bb577628ae33","Type":"ContainerDied","Data":"fc863da79b2daeddca9a76f0a902b0204dda157c4501b808f1b4fa8a7ca04c0c"} Nov 25 15:37:37 crc kubenswrapper[4800]: I1125 15:37:37.350015 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jmbtv-config-v45jr" event={"ID":"ba1dca90-fab6-485f-9fc6-bb577628ae33","Type":"ContainerStarted","Data":"0cf031d9890c26ea715e672ee433a66cca15be427e82b2333bdfa6849b92b03c"} Nov 25 15:37:37 crc kubenswrapper[4800]: E1125 15:37:37.581340 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2a07cf0_e668_43c3_bc9a_8594243f1d02.slice/crio-2836fd8e5cf597edcb7edd19096fe074494e9c09ebe144346baf7e9428e7ee58\": RecentStats: unable to find data in memory cache]" Nov 25 15:37:37 crc kubenswrapper[4800]: I1125 15:37:37.954914 4800 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podcf387b8c-24e2-43e5-a1c7-65b876b98b8d"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podcf387b8c-24e2-43e5-a1c7-65b876b98b8d] : Timed out while waiting for systemd to remove kubepods-besteffort-podcf387b8c_24e2_43e5_a1c7_65b876b98b8d.slice" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.711589 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.859972 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ba1dca90-fab6-485f-9fc6-bb577628ae33-additional-scripts\") pod \"ba1dca90-fab6-485f-9fc6-bb577628ae33\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.860498 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-log-ovn\") pod \"ba1dca90-fab6-485f-9fc6-bb577628ae33\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.860559 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tds6\" (UniqueName: \"kubernetes.io/projected/ba1dca90-fab6-485f-9fc6-bb577628ae33-kube-api-access-7tds6\") pod \"ba1dca90-fab6-485f-9fc6-bb577628ae33\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.860572 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "ba1dca90-fab6-485f-9fc6-bb577628ae33" (UID: "ba1dca90-fab6-485f-9fc6-bb577628ae33"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.860619 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-run\") pod \"ba1dca90-fab6-485f-9fc6-bb577628ae33\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.860767 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-run" (OuterVolumeSpecName: "var-run") pod "ba1dca90-fab6-485f-9fc6-bb577628ae33" (UID: "ba1dca90-fab6-485f-9fc6-bb577628ae33"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.860874 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba1dca90-fab6-485f-9fc6-bb577628ae33-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "ba1dca90-fab6-485f-9fc6-bb577628ae33" (UID: "ba1dca90-fab6-485f-9fc6-bb577628ae33"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.861461 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ba1dca90-fab6-485f-9fc6-bb577628ae33-scripts\") pod \"ba1dca90-fab6-485f-9fc6-bb577628ae33\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.861580 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-run-ovn\") pod \"ba1dca90-fab6-485f-9fc6-bb577628ae33\" (UID: \"ba1dca90-fab6-485f-9fc6-bb577628ae33\") " Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.861623 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "ba1dca90-fab6-485f-9fc6-bb577628ae33" (UID: "ba1dca90-fab6-485f-9fc6-bb577628ae33"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.862120 4800 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ba1dca90-fab6-485f-9fc6-bb577628ae33-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.862142 4800 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.862155 4800 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.862167 4800 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ba1dca90-fab6-485f-9fc6-bb577628ae33-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.862282 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba1dca90-fab6-485f-9fc6-bb577628ae33-scripts" (OuterVolumeSpecName: "scripts") pod "ba1dca90-fab6-485f-9fc6-bb577628ae33" (UID: "ba1dca90-fab6-485f-9fc6-bb577628ae33"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.869375 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba1dca90-fab6-485f-9fc6-bb577628ae33-kube-api-access-7tds6" (OuterVolumeSpecName: "kube-api-access-7tds6") pod "ba1dca90-fab6-485f-9fc6-bb577628ae33" (UID: "ba1dca90-fab6-485f-9fc6-bb577628ae33"). InnerVolumeSpecName "kube-api-access-7tds6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.963589 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tds6\" (UniqueName: \"kubernetes.io/projected/ba1dca90-fab6-485f-9fc6-bb577628ae33-kube-api-access-7tds6\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:38 crc kubenswrapper[4800]: I1125 15:37:38.963651 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ba1dca90-fab6-485f-9fc6-bb577628ae33-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:37:39 crc kubenswrapper[4800]: I1125 15:37:39.368609 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jmbtv-config-v45jr" event={"ID":"ba1dca90-fab6-485f-9fc6-bb577628ae33","Type":"ContainerDied","Data":"0cf031d9890c26ea715e672ee433a66cca15be427e82b2333bdfa6849b92b03c"} Nov 25 15:37:39 crc kubenswrapper[4800]: I1125 15:37:39.368623 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jmbtv-config-v45jr" Nov 25 15:37:39 crc kubenswrapper[4800]: I1125 15:37:39.368688 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0cf031d9890c26ea715e672ee433a66cca15be427e82b2333bdfa6849b92b03c" Nov 25 15:37:39 crc kubenswrapper[4800]: I1125 15:37:39.835028 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-jmbtv-config-v45jr"] Nov 25 15:37:39 crc kubenswrapper[4800]: I1125 15:37:39.841582 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-jmbtv-config-v45jr"] Nov 25 15:37:40 crc kubenswrapper[4800]: I1125 15:37:40.644655 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-jmbtv" Nov 25 15:37:40 crc kubenswrapper[4800]: I1125 15:37:40.966018 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-fxh9k"] Nov 25 15:37:40 crc kubenswrapper[4800]: E1125 15:37:40.966514 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba1dca90-fab6-485f-9fc6-bb577628ae33" containerName="ovn-config" Nov 25 15:37:40 crc kubenswrapper[4800]: I1125 15:37:40.966558 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba1dca90-fab6-485f-9fc6-bb577628ae33" containerName="ovn-config" Nov 25 15:37:40 crc kubenswrapper[4800]: I1125 15:37:40.966776 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba1dca90-fab6-485f-9fc6-bb577628ae33" containerName="ovn-config" Nov 25 15:37:40 crc kubenswrapper[4800]: I1125 15:37:40.967899 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:40 crc kubenswrapper[4800]: I1125 15:37:40.973801 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 25 15:37:40 crc kubenswrapper[4800]: I1125 15:37:40.974189 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-58txk" Nov 25 15:37:40 crc kubenswrapper[4800]: I1125 15:37:40.978996 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-fxh9k"] Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.111832 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-combined-ca-bundle\") pod \"glance-db-sync-fxh9k\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.111923 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-config-data\") pod \"glance-db-sync-fxh9k\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.111952 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-db-sync-config-data\") pod \"glance-db-sync-fxh9k\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.112210 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzpvf\" (UniqueName: \"kubernetes.io/projected/861a549f-5373-4d45-befd-3859dbfdc705-kube-api-access-kzpvf\") pod \"glance-db-sync-fxh9k\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.214282 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzpvf\" (UniqueName: \"kubernetes.io/projected/861a549f-5373-4d45-befd-3859dbfdc705-kube-api-access-kzpvf\") pod \"glance-db-sync-fxh9k\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.214478 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-combined-ca-bundle\") pod \"glance-db-sync-fxh9k\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.214577 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-config-data\") pod \"glance-db-sync-fxh9k\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.214609 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-db-sync-config-data\") pod \"glance-db-sync-fxh9k\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.221986 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-combined-ca-bundle\") pod \"glance-db-sync-fxh9k\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.225112 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-config-data\") pod \"glance-db-sync-fxh9k\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.225320 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-db-sync-config-data\") pod \"glance-db-sync-fxh9k\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.240133 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzpvf\" (UniqueName: \"kubernetes.io/projected/861a549f-5373-4d45-befd-3859dbfdc705-kube-api-access-kzpvf\") pod \"glance-db-sync-fxh9k\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.291751 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-fxh9k" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.800522 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba1dca90-fab6-485f-9fc6-bb577628ae33" path="/var/lib/kubelet/pods/ba1dca90-fab6-485f-9fc6-bb577628ae33/volumes" Nov 25 15:37:41 crc kubenswrapper[4800]: I1125 15:37:41.888297 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-fxh9k"] Nov 25 15:37:41 crc kubenswrapper[4800]: W1125 15:37:41.897229 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod861a549f_5373_4d45_befd_3859dbfdc705.slice/crio-596b6f80c2daea62bca50f8964ff79865338839f010227c4b3f1727925d14cb7 WatchSource:0}: Error finding container 596b6f80c2daea62bca50f8964ff79865338839f010227c4b3f1727925d14cb7: Status 404 returned error can't find the container with id 596b6f80c2daea62bca50f8964ff79865338839f010227c4b3f1727925d14cb7 Nov 25 15:37:42 crc kubenswrapper[4800]: I1125 15:37:42.407236 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-fxh9k" event={"ID":"861a549f-5373-4d45-befd-3859dbfdc705","Type":"ContainerStarted","Data":"596b6f80c2daea62bca50f8964ff79865338839f010227c4b3f1727925d14cb7"} Nov 25 15:37:42 crc kubenswrapper[4800]: I1125 15:37:42.640358 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:37:42 crc kubenswrapper[4800]: I1125 15:37:42.640453 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:37:47 crc kubenswrapper[4800]: I1125 15:37:47.418383 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="72be41d8-6678-467c-a4d5-c4340e488c1b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.97:5671: connect: connection refused" Nov 25 15:37:47 crc kubenswrapper[4800]: I1125 15:37:47.539482 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="46b2c800-efef-4668-9a57-c66ff504e0db" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Nov 25 15:37:47 crc kubenswrapper[4800]: E1125 15:37:47.819060 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2a07cf0_e668_43c3_bc9a_8594243f1d02.slice/crio-2836fd8e5cf597edcb7edd19096fe074494e9c09ebe144346baf7e9428e7ee58\": RecentStats: unable to find data in memory cache]" Nov 25 15:37:57 crc kubenswrapper[4800]: E1125 15:37:57.388397 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api@sha256:26bd7b0bd6070856aefef6fe754c547d55c056396ea30d879d34c2d49b5a1d29" Nov 25 15:37:57 crc kubenswrapper[4800]: E1125 15:37:57.389080 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api@sha256:26bd7b0bd6070856aefef6fe754c547d55c056396ea30d879d34c2d49b5a1d29,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kzpvf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-fxh9k_openstack(861a549f-5373-4d45-befd-3859dbfdc705): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:37:57 crc kubenswrapper[4800]: E1125 15:37:57.390377 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-fxh9k" podUID="861a549f-5373-4d45-befd-3859dbfdc705" Nov 25 15:37:57 crc kubenswrapper[4800]: I1125 15:37:57.417068 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 15:37:57 crc kubenswrapper[4800]: I1125 15:37:57.543137 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:37:57 crc kubenswrapper[4800]: E1125 15:37:57.576453 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api@sha256:26bd7b0bd6070856aefef6fe754c547d55c056396ea30d879d34c2d49b5a1d29\\\"\"" pod="openstack/glance-db-sync-fxh9k" podUID="861a549f-5373-4d45-befd-3859dbfdc705" Nov 25 15:37:57 crc kubenswrapper[4800]: I1125 15:37:57.892008 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-h5lb7"] Nov 25 15:37:57 crc kubenswrapper[4800]: I1125 15:37:57.894413 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-h5lb7" Nov 25 15:37:57 crc kubenswrapper[4800]: I1125 15:37:57.943641 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-h5lb7"] Nov 25 15:37:57 crc kubenswrapper[4800]: I1125 15:37:57.971414 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-6e3d-account-create-pkf44"] Nov 25 15:37:57 crc kubenswrapper[4800]: I1125 15:37:57.975004 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6e3d-account-create-pkf44" Nov 25 15:37:57 crc kubenswrapper[4800]: I1125 15:37:57.977166 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7kvq\" (UniqueName: \"kubernetes.io/projected/fb6483f1-4969-48cf-814f-ecdd47c261ec-kube-api-access-w7kvq\") pod \"cinder-db-create-h5lb7\" (UID: \"fb6483f1-4969-48cf-814f-ecdd47c261ec\") " pod="openstack/cinder-db-create-h5lb7" Nov 25 15:37:57 crc kubenswrapper[4800]: I1125 15:37:57.977265 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb6483f1-4969-48cf-814f-ecdd47c261ec-operator-scripts\") pod \"cinder-db-create-h5lb7\" (UID: \"fb6483f1-4969-48cf-814f-ecdd47c261ec\") " pod="openstack/cinder-db-create-h5lb7" Nov 25 15:37:57 crc kubenswrapper[4800]: I1125 15:37:57.982348 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.037763 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-6e3d-account-create-pkf44"] Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.064513 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-thkrz"] Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.066336 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-thkrz" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.079105 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tm8h5\" (UniqueName: \"kubernetes.io/projected/365fdcd1-ee57-49e4-819f-e8c567c99001-kube-api-access-tm8h5\") pod \"barbican-6e3d-account-create-pkf44\" (UID: \"365fdcd1-ee57-49e4-819f-e8c567c99001\") " pod="openstack/barbican-6e3d-account-create-pkf44" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.079438 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7kvq\" (UniqueName: \"kubernetes.io/projected/fb6483f1-4969-48cf-814f-ecdd47c261ec-kube-api-access-w7kvq\") pod \"cinder-db-create-h5lb7\" (UID: \"fb6483f1-4969-48cf-814f-ecdd47c261ec\") " pod="openstack/cinder-db-create-h5lb7" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.079561 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb6483f1-4969-48cf-814f-ecdd47c261ec-operator-scripts\") pod \"cinder-db-create-h5lb7\" (UID: \"fb6483f1-4969-48cf-814f-ecdd47c261ec\") " pod="openstack/cinder-db-create-h5lb7" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.079662 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/365fdcd1-ee57-49e4-819f-e8c567c99001-operator-scripts\") pod \"barbican-6e3d-account-create-pkf44\" (UID: \"365fdcd1-ee57-49e4-819f-e8c567c99001\") " pod="openstack/barbican-6e3d-account-create-pkf44" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.080462 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb6483f1-4969-48cf-814f-ecdd47c261ec-operator-scripts\") pod \"cinder-db-create-h5lb7\" (UID: \"fb6483f1-4969-48cf-814f-ecdd47c261ec\") " pod="openstack/cinder-db-create-h5lb7" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.099817 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-0885-account-create-jgg7v"] Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.101256 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0885-account-create-jgg7v" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.104963 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.108346 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-thkrz"] Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.109509 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7kvq\" (UniqueName: \"kubernetes.io/projected/fb6483f1-4969-48cf-814f-ecdd47c261ec-kube-api-access-w7kvq\") pod \"cinder-db-create-h5lb7\" (UID: \"fb6483f1-4969-48cf-814f-ecdd47c261ec\") " pod="openstack/cinder-db-create-h5lb7" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.120055 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-0885-account-create-jgg7v"] Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.132051 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-p9448"] Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.133368 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-p9448" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.138275 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.138280 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.138550 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.138413 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-p5fqm" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.143746 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-p9448"] Nov 25 15:37:58 crc kubenswrapper[4800]: E1125 15:37:58.181522 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2a07cf0_e668_43c3_bc9a_8594243f1d02.slice/crio-2836fd8e5cf597edcb7edd19096fe074494e9c09ebe144346baf7e9428e7ee58\": RecentStats: unable to find data in memory cache]" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.181553 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tm8h5\" (UniqueName: \"kubernetes.io/projected/365fdcd1-ee57-49e4-819f-e8c567c99001-kube-api-access-tm8h5\") pod \"barbican-6e3d-account-create-pkf44\" (UID: \"365fdcd1-ee57-49e4-819f-e8c567c99001\") " pod="openstack/barbican-6e3d-account-create-pkf44" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.181600 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da401e63-0bdb-4057-844e-c5938c5d9a98-operator-scripts\") pod \"barbican-db-create-thkrz\" (UID: \"da401e63-0bdb-4057-844e-c5938c5d9a98\") " pod="openstack/barbican-db-create-thkrz" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.181649 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78f8b613-903f-45b0-bf62-176546fd4f72-operator-scripts\") pod \"cinder-0885-account-create-jgg7v\" (UID: \"78f8b613-903f-45b0-bf62-176546fd4f72\") " pod="openstack/cinder-0885-account-create-jgg7v" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.181678 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlzl9\" (UniqueName: \"kubernetes.io/projected/78f8b613-903f-45b0-bf62-176546fd4f72-kube-api-access-tlzl9\") pod \"cinder-0885-account-create-jgg7v\" (UID: \"78f8b613-903f-45b0-bf62-176546fd4f72\") " pod="openstack/cinder-0885-account-create-jgg7v" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.181722 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/365fdcd1-ee57-49e4-819f-e8c567c99001-operator-scripts\") pod \"barbican-6e3d-account-create-pkf44\" (UID: \"365fdcd1-ee57-49e4-819f-e8c567c99001\") " pod="openstack/barbican-6e3d-account-create-pkf44" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.181759 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjjlj\" (UniqueName: \"kubernetes.io/projected/da401e63-0bdb-4057-844e-c5938c5d9a98-kube-api-access-jjjlj\") pod \"barbican-db-create-thkrz\" (UID: \"da401e63-0bdb-4057-844e-c5938c5d9a98\") " pod="openstack/barbican-db-create-thkrz" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.183494 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/365fdcd1-ee57-49e4-819f-e8c567c99001-operator-scripts\") pod \"barbican-6e3d-account-create-pkf44\" (UID: \"365fdcd1-ee57-49e4-819f-e8c567c99001\") " pod="openstack/barbican-6e3d-account-create-pkf44" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.201201 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tm8h5\" (UniqueName: \"kubernetes.io/projected/365fdcd1-ee57-49e4-819f-e8c567c99001-kube-api-access-tm8h5\") pod \"barbican-6e3d-account-create-pkf44\" (UID: \"365fdcd1-ee57-49e4-819f-e8c567c99001\") " pod="openstack/barbican-6e3d-account-create-pkf44" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.240053 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-h5lb7" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.285942 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjjlj\" (UniqueName: \"kubernetes.io/projected/da401e63-0bdb-4057-844e-c5938c5d9a98-kube-api-access-jjjlj\") pod \"barbican-db-create-thkrz\" (UID: \"da401e63-0bdb-4057-844e-c5938c5d9a98\") " pod="openstack/barbican-db-create-thkrz" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.286349 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9722e1-2a72-4b42-a605-4e5476890d27-config-data\") pod \"keystone-db-sync-p9448\" (UID: \"fb9722e1-2a72-4b42-a605-4e5476890d27\") " pod="openstack/keystone-db-sync-p9448" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.286420 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbnv7\" (UniqueName: \"kubernetes.io/projected/fb9722e1-2a72-4b42-a605-4e5476890d27-kube-api-access-wbnv7\") pod \"keystone-db-sync-p9448\" (UID: \"fb9722e1-2a72-4b42-a605-4e5476890d27\") " pod="openstack/keystone-db-sync-p9448" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.286447 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da401e63-0bdb-4057-844e-c5938c5d9a98-operator-scripts\") pod \"barbican-db-create-thkrz\" (UID: \"da401e63-0bdb-4057-844e-c5938c5d9a98\") " pod="openstack/barbican-db-create-thkrz" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.286484 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78f8b613-903f-45b0-bf62-176546fd4f72-operator-scripts\") pod \"cinder-0885-account-create-jgg7v\" (UID: \"78f8b613-903f-45b0-bf62-176546fd4f72\") " pod="openstack/cinder-0885-account-create-jgg7v" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.286513 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9722e1-2a72-4b42-a605-4e5476890d27-combined-ca-bundle\") pod \"keystone-db-sync-p9448\" (UID: \"fb9722e1-2a72-4b42-a605-4e5476890d27\") " pod="openstack/keystone-db-sync-p9448" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.286533 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlzl9\" (UniqueName: \"kubernetes.io/projected/78f8b613-903f-45b0-bf62-176546fd4f72-kube-api-access-tlzl9\") pod \"cinder-0885-account-create-jgg7v\" (UID: \"78f8b613-903f-45b0-bf62-176546fd4f72\") " pod="openstack/cinder-0885-account-create-jgg7v" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.288236 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da401e63-0bdb-4057-844e-c5938c5d9a98-operator-scripts\") pod \"barbican-db-create-thkrz\" (UID: \"da401e63-0bdb-4057-844e-c5938c5d9a98\") " pod="openstack/barbican-db-create-thkrz" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.288723 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78f8b613-903f-45b0-bf62-176546fd4f72-operator-scripts\") pod \"cinder-0885-account-create-jgg7v\" (UID: \"78f8b613-903f-45b0-bf62-176546fd4f72\") " pod="openstack/cinder-0885-account-create-jgg7v" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.300947 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-2ffe-account-create-cxs5m"] Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.306458 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-2ffe-account-create-cxs5m" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.310096 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.315461 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlzl9\" (UniqueName: \"kubernetes.io/projected/78f8b613-903f-45b0-bf62-176546fd4f72-kube-api-access-tlzl9\") pod \"cinder-0885-account-create-jgg7v\" (UID: \"78f8b613-903f-45b0-bf62-176546fd4f72\") " pod="openstack/cinder-0885-account-create-jgg7v" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.315871 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjjlj\" (UniqueName: \"kubernetes.io/projected/da401e63-0bdb-4057-844e-c5938c5d9a98-kube-api-access-jjjlj\") pod \"barbican-db-create-thkrz\" (UID: \"da401e63-0bdb-4057-844e-c5938c5d9a98\") " pod="openstack/barbican-db-create-thkrz" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.331522 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-2ffe-account-create-cxs5m"] Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.334418 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6e3d-account-create-pkf44" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.387789 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/868262b9-c453-46a0-8885-1ccf13e06e98-operator-scripts\") pod \"neutron-2ffe-account-create-cxs5m\" (UID: \"868262b9-c453-46a0-8885-1ccf13e06e98\") " pod="openstack/neutron-2ffe-account-create-cxs5m" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.387901 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9722e1-2a72-4b42-a605-4e5476890d27-combined-ca-bundle\") pod \"keystone-db-sync-p9448\" (UID: \"fb9722e1-2a72-4b42-a605-4e5476890d27\") " pod="openstack/keystone-db-sync-p9448" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.387957 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7zqr\" (UniqueName: \"kubernetes.io/projected/868262b9-c453-46a0-8885-1ccf13e06e98-kube-api-access-m7zqr\") pod \"neutron-2ffe-account-create-cxs5m\" (UID: \"868262b9-c453-46a0-8885-1ccf13e06e98\") " pod="openstack/neutron-2ffe-account-create-cxs5m" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.388004 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9722e1-2a72-4b42-a605-4e5476890d27-config-data\") pod \"keystone-db-sync-p9448\" (UID: \"fb9722e1-2a72-4b42-a605-4e5476890d27\") " pod="openstack/keystone-db-sync-p9448" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.388039 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbnv7\" (UniqueName: \"kubernetes.io/projected/fb9722e1-2a72-4b42-a605-4e5476890d27-kube-api-access-wbnv7\") pod \"keystone-db-sync-p9448\" (UID: \"fb9722e1-2a72-4b42-a605-4e5476890d27\") " pod="openstack/keystone-db-sync-p9448" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.392417 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9722e1-2a72-4b42-a605-4e5476890d27-config-data\") pod \"keystone-db-sync-p9448\" (UID: \"fb9722e1-2a72-4b42-a605-4e5476890d27\") " pod="openstack/keystone-db-sync-p9448" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.408911 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-thkrz" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.415029 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9722e1-2a72-4b42-a605-4e5476890d27-combined-ca-bundle\") pod \"keystone-db-sync-p9448\" (UID: \"fb9722e1-2a72-4b42-a605-4e5476890d27\") " pod="openstack/keystone-db-sync-p9448" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.417983 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbnv7\" (UniqueName: \"kubernetes.io/projected/fb9722e1-2a72-4b42-a605-4e5476890d27-kube-api-access-wbnv7\") pod \"keystone-db-sync-p9448\" (UID: \"fb9722e1-2a72-4b42-a605-4e5476890d27\") " pod="openstack/keystone-db-sync-p9448" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.422912 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-bcj2h"] Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.424286 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-bcj2h" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.442561 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-bcj2h"] Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.462606 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0885-account-create-jgg7v" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.477765 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-p9448" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.493068 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7zqr\" (UniqueName: \"kubernetes.io/projected/868262b9-c453-46a0-8885-1ccf13e06e98-kube-api-access-m7zqr\") pod \"neutron-2ffe-account-create-cxs5m\" (UID: \"868262b9-c453-46a0-8885-1ccf13e06e98\") " pod="openstack/neutron-2ffe-account-create-cxs5m" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.493148 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68fc1e6f-3787-4d76-9ed2-701a8170a037-operator-scripts\") pod \"neutron-db-create-bcj2h\" (UID: \"68fc1e6f-3787-4d76-9ed2-701a8170a037\") " pod="openstack/neutron-db-create-bcj2h" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.493225 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/868262b9-c453-46a0-8885-1ccf13e06e98-operator-scripts\") pod \"neutron-2ffe-account-create-cxs5m\" (UID: \"868262b9-c453-46a0-8885-1ccf13e06e98\") " pod="openstack/neutron-2ffe-account-create-cxs5m" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.493261 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njxfs\" (UniqueName: \"kubernetes.io/projected/68fc1e6f-3787-4d76-9ed2-701a8170a037-kube-api-access-njxfs\") pod \"neutron-db-create-bcj2h\" (UID: \"68fc1e6f-3787-4d76-9ed2-701a8170a037\") " pod="openstack/neutron-db-create-bcj2h" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.494187 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/868262b9-c453-46a0-8885-1ccf13e06e98-operator-scripts\") pod \"neutron-2ffe-account-create-cxs5m\" (UID: \"868262b9-c453-46a0-8885-1ccf13e06e98\") " pod="openstack/neutron-2ffe-account-create-cxs5m" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.527497 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7zqr\" (UniqueName: \"kubernetes.io/projected/868262b9-c453-46a0-8885-1ccf13e06e98-kube-api-access-m7zqr\") pod \"neutron-2ffe-account-create-cxs5m\" (UID: \"868262b9-c453-46a0-8885-1ccf13e06e98\") " pod="openstack/neutron-2ffe-account-create-cxs5m" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.607412 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njxfs\" (UniqueName: \"kubernetes.io/projected/68fc1e6f-3787-4d76-9ed2-701a8170a037-kube-api-access-njxfs\") pod \"neutron-db-create-bcj2h\" (UID: \"68fc1e6f-3787-4d76-9ed2-701a8170a037\") " pod="openstack/neutron-db-create-bcj2h" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.607978 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68fc1e6f-3787-4d76-9ed2-701a8170a037-operator-scripts\") pod \"neutron-db-create-bcj2h\" (UID: \"68fc1e6f-3787-4d76-9ed2-701a8170a037\") " pod="openstack/neutron-db-create-bcj2h" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.609470 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68fc1e6f-3787-4d76-9ed2-701a8170a037-operator-scripts\") pod \"neutron-db-create-bcj2h\" (UID: \"68fc1e6f-3787-4d76-9ed2-701a8170a037\") " pod="openstack/neutron-db-create-bcj2h" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.634579 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njxfs\" (UniqueName: \"kubernetes.io/projected/68fc1e6f-3787-4d76-9ed2-701a8170a037-kube-api-access-njxfs\") pod \"neutron-db-create-bcj2h\" (UID: \"68fc1e6f-3787-4d76-9ed2-701a8170a037\") " pod="openstack/neutron-db-create-bcj2h" Nov 25 15:37:58 crc kubenswrapper[4800]: W1125 15:37:58.707287 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb6483f1_4969_48cf_814f_ecdd47c261ec.slice/crio-b4302bef2ecc01bbca8cec6f2ab8e5dd06e3b439e6511a08fc3a641d337d425b WatchSource:0}: Error finding container b4302bef2ecc01bbca8cec6f2ab8e5dd06e3b439e6511a08fc3a641d337d425b: Status 404 returned error can't find the container with id b4302bef2ecc01bbca8cec6f2ab8e5dd06e3b439e6511a08fc3a641d337d425b Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.708755 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-h5lb7"] Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.728437 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-2ffe-account-create-cxs5m" Nov 25 15:37:58 crc kubenswrapper[4800]: I1125 15:37:58.769466 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-bcj2h" Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.025605 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-6e3d-account-create-pkf44"] Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.151577 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-0885-account-create-jgg7v"] Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.237931 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-thkrz"] Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.252598 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-p9448"] Nov 25 15:37:59 crc kubenswrapper[4800]: W1125 15:37:59.270103 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda401e63_0bdb_4057_844e_c5938c5d9a98.slice/crio-6852ddaf1ad4e87280a4d7ceb5dc1f7240ede7cfa23d915cd6ddc81c6f0654f7 WatchSource:0}: Error finding container 6852ddaf1ad4e87280a4d7ceb5dc1f7240ede7cfa23d915cd6ddc81c6f0654f7: Status 404 returned error can't find the container with id 6852ddaf1ad4e87280a4d7ceb5dc1f7240ede7cfa23d915cd6ddc81c6f0654f7 Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.273394 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-bcj2h"] Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.348635 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-2ffe-account-create-cxs5m"] Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.600884 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-bcj2h" event={"ID":"68fc1e6f-3787-4d76-9ed2-701a8170a037","Type":"ContainerStarted","Data":"13925ae164f36b9e848e1667cdcb3c7cc670f0fa113802fb1aa2a46f381d319a"} Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.602700 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0885-account-create-jgg7v" event={"ID":"78f8b613-903f-45b0-bf62-176546fd4f72","Type":"ContainerStarted","Data":"004339295a1ec9ece749574808318879e39c0651c7914c5d5369f50c440a576f"} Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.605093 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-h5lb7" event={"ID":"fb6483f1-4969-48cf-814f-ecdd47c261ec","Type":"ContainerStarted","Data":"b0192bdaf22666e1dd439e8e12b5ff92088a8bbd38dc90a0fefe34c7f11acc18"} Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.605157 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-h5lb7" event={"ID":"fb6483f1-4969-48cf-814f-ecdd47c261ec","Type":"ContainerStarted","Data":"b4302bef2ecc01bbca8cec6f2ab8e5dd06e3b439e6511a08fc3a641d337d425b"} Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.606662 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6e3d-account-create-pkf44" event={"ID":"365fdcd1-ee57-49e4-819f-e8c567c99001","Type":"ContainerStarted","Data":"cc227100a7829517cf693033ba047d7c2f354eabc91a8c42554d815a6d700fd9"} Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.608536 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-thkrz" event={"ID":"da401e63-0bdb-4057-844e-c5938c5d9a98","Type":"ContainerStarted","Data":"6852ddaf1ad4e87280a4d7ceb5dc1f7240ede7cfa23d915cd6ddc81c6f0654f7"} Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.610367 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-2ffe-account-create-cxs5m" event={"ID":"868262b9-c453-46a0-8885-1ccf13e06e98","Type":"ContainerStarted","Data":"e5416699f35eee57b4d17d6d616c5e6dcaf120f64801456526d1971145c5941f"} Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.612815 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-p9448" event={"ID":"fb9722e1-2a72-4b42-a605-4e5476890d27","Type":"ContainerStarted","Data":"57212092ab60d8fbd3e6b8e34a798f541f1727d567620303a61b3f19d8215cf8"} Nov 25 15:37:59 crc kubenswrapper[4800]: I1125 15:37:59.627109 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-h5lb7" podStartSLOduration=2.627085207 podStartE2EDuration="2.627085207s" podCreationTimestamp="2025-11-25 15:37:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:37:59.619947135 +0000 UTC m=+1240.674355617" watchObservedRunningTime="2025-11-25 15:37:59.627085207 +0000 UTC m=+1240.681493689" Nov 25 15:38:00 crc kubenswrapper[4800]: I1125 15:38:00.629698 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-bcj2h" event={"ID":"68fc1e6f-3787-4d76-9ed2-701a8170a037","Type":"ContainerStarted","Data":"6e07c0061071d76abda106c98f11d021f3e91e6206bf99d831012d7ea89fadce"} Nov 25 15:38:00 crc kubenswrapper[4800]: I1125 15:38:00.636828 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0885-account-create-jgg7v" event={"ID":"78f8b613-903f-45b0-bf62-176546fd4f72","Type":"ContainerStarted","Data":"1efd704f7973837c024f03c126dd4e14e00c8a74d40bf0840bd49fe31d525108"} Nov 25 15:38:00 crc kubenswrapper[4800]: I1125 15:38:00.640221 4800 generic.go:334] "Generic (PLEG): container finished" podID="fb6483f1-4969-48cf-814f-ecdd47c261ec" containerID="b0192bdaf22666e1dd439e8e12b5ff92088a8bbd38dc90a0fefe34c7f11acc18" exitCode=0 Nov 25 15:38:00 crc kubenswrapper[4800]: I1125 15:38:00.640278 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-h5lb7" event={"ID":"fb6483f1-4969-48cf-814f-ecdd47c261ec","Type":"ContainerDied","Data":"b0192bdaf22666e1dd439e8e12b5ff92088a8bbd38dc90a0fefe34c7f11acc18"} Nov 25 15:38:00 crc kubenswrapper[4800]: I1125 15:38:00.643038 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6e3d-account-create-pkf44" event={"ID":"365fdcd1-ee57-49e4-819f-e8c567c99001","Type":"ContainerStarted","Data":"30f971832485fc6ef3b5d63804e2e985284521908da238b4b61ee1433eea5aad"} Nov 25 15:38:00 crc kubenswrapper[4800]: I1125 15:38:00.651308 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-thkrz" event={"ID":"da401e63-0bdb-4057-844e-c5938c5d9a98","Type":"ContainerStarted","Data":"9efae3f313f62828f4f2eacf5d2fc025c4c6e1168df51c56a70b5f3dd1c4bb19"} Nov 25 15:38:00 crc kubenswrapper[4800]: I1125 15:38:00.656403 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-bcj2h" podStartSLOduration=2.656382599 podStartE2EDuration="2.656382599s" podCreationTimestamp="2025-11-25 15:37:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:38:00.653074726 +0000 UTC m=+1241.707483208" watchObservedRunningTime="2025-11-25 15:38:00.656382599 +0000 UTC m=+1241.710791081" Nov 25 15:38:00 crc kubenswrapper[4800]: I1125 15:38:00.657405 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-2ffe-account-create-cxs5m" event={"ID":"868262b9-c453-46a0-8885-1ccf13e06e98","Type":"ContainerStarted","Data":"5cd6f865f54c020b55cdab91d2a1c4e5fb69be0675585cf2034baf9517c73651"} Nov 25 15:38:00 crc kubenswrapper[4800]: I1125 15:38:00.696419 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-6e3d-account-create-pkf44" podStartSLOduration=3.696399856 podStartE2EDuration="3.696399856s" podCreationTimestamp="2025-11-25 15:37:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:38:00.693111043 +0000 UTC m=+1241.747519535" watchObservedRunningTime="2025-11-25 15:38:00.696399856 +0000 UTC m=+1241.750808338" Nov 25 15:38:00 crc kubenswrapper[4800]: I1125 15:38:00.720560 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-thkrz" podStartSLOduration=3.720535226 podStartE2EDuration="3.720535226s" podCreationTimestamp="2025-11-25 15:37:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:38:00.713614312 +0000 UTC m=+1241.768022794" watchObservedRunningTime="2025-11-25 15:38:00.720535226 +0000 UTC m=+1241.774943708" Nov 25 15:38:00 crc kubenswrapper[4800]: I1125 15:38:00.739317 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-0885-account-create-jgg7v" podStartSLOduration=3.739287335 podStartE2EDuration="3.739287335s" podCreationTimestamp="2025-11-25 15:37:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:38:00.727002418 +0000 UTC m=+1241.781410900" watchObservedRunningTime="2025-11-25 15:38:00.739287335 +0000 UTC m=+1241.793695827" Nov 25 15:38:00 crc kubenswrapper[4800]: I1125 15:38:00.753046 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-2ffe-account-create-cxs5m" podStartSLOduration=2.753017571 podStartE2EDuration="2.753017571s" podCreationTimestamp="2025-11-25 15:37:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:38:00.74159649 +0000 UTC m=+1241.796004972" watchObservedRunningTime="2025-11-25 15:38:00.753017571 +0000 UTC m=+1241.807426053" Nov 25 15:38:01 crc kubenswrapper[4800]: I1125 15:38:01.665730 4800 generic.go:334] "Generic (PLEG): container finished" podID="868262b9-c453-46a0-8885-1ccf13e06e98" containerID="5cd6f865f54c020b55cdab91d2a1c4e5fb69be0675585cf2034baf9517c73651" exitCode=0 Nov 25 15:38:01 crc kubenswrapper[4800]: I1125 15:38:01.665940 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-2ffe-account-create-cxs5m" event={"ID":"868262b9-c453-46a0-8885-1ccf13e06e98","Type":"ContainerDied","Data":"5cd6f865f54c020b55cdab91d2a1c4e5fb69be0675585cf2034baf9517c73651"} Nov 25 15:38:01 crc kubenswrapper[4800]: I1125 15:38:01.668708 4800 generic.go:334] "Generic (PLEG): container finished" podID="68fc1e6f-3787-4d76-9ed2-701a8170a037" containerID="6e07c0061071d76abda106c98f11d021f3e91e6206bf99d831012d7ea89fadce" exitCode=0 Nov 25 15:38:01 crc kubenswrapper[4800]: I1125 15:38:01.668747 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-bcj2h" event={"ID":"68fc1e6f-3787-4d76-9ed2-701a8170a037","Type":"ContainerDied","Data":"6e07c0061071d76abda106c98f11d021f3e91e6206bf99d831012d7ea89fadce"} Nov 25 15:38:01 crc kubenswrapper[4800]: I1125 15:38:01.670417 4800 generic.go:334] "Generic (PLEG): container finished" podID="365fdcd1-ee57-49e4-819f-e8c567c99001" containerID="30f971832485fc6ef3b5d63804e2e985284521908da238b4b61ee1433eea5aad" exitCode=0 Nov 25 15:38:01 crc kubenswrapper[4800]: I1125 15:38:01.670513 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6e3d-account-create-pkf44" event={"ID":"365fdcd1-ee57-49e4-819f-e8c567c99001","Type":"ContainerDied","Data":"30f971832485fc6ef3b5d63804e2e985284521908da238b4b61ee1433eea5aad"} Nov 25 15:38:02 crc kubenswrapper[4800]: I1125 15:38:02.681879 4800 generic.go:334] "Generic (PLEG): container finished" podID="da401e63-0bdb-4057-844e-c5938c5d9a98" containerID="9efae3f313f62828f4f2eacf5d2fc025c4c6e1168df51c56a70b5f3dd1c4bb19" exitCode=0 Nov 25 15:38:02 crc kubenswrapper[4800]: I1125 15:38:02.681961 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-thkrz" event={"ID":"da401e63-0bdb-4057-844e-c5938c5d9a98","Type":"ContainerDied","Data":"9efae3f313f62828f4f2eacf5d2fc025c4c6e1168df51c56a70b5f3dd1c4bb19"} Nov 25 15:38:02 crc kubenswrapper[4800]: I1125 15:38:02.684534 4800 generic.go:334] "Generic (PLEG): container finished" podID="78f8b613-903f-45b0-bf62-176546fd4f72" containerID="1efd704f7973837c024f03c126dd4e14e00c8a74d40bf0840bd49fe31d525108" exitCode=0 Nov 25 15:38:02 crc kubenswrapper[4800]: I1125 15:38:02.684654 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0885-account-create-jgg7v" event={"ID":"78f8b613-903f-45b0-bf62-176546fd4f72","Type":"ContainerDied","Data":"1efd704f7973837c024f03c126dd4e14e00c8a74d40bf0840bd49fe31d525108"} Nov 25 15:38:05 crc kubenswrapper[4800]: I1125 15:38:05.876729 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-h5lb7" Nov 25 15:38:05 crc kubenswrapper[4800]: I1125 15:38:05.882285 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6e3d-account-create-pkf44" Nov 25 15:38:05 crc kubenswrapper[4800]: I1125 15:38:05.890028 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-2ffe-account-create-cxs5m" Nov 25 15:38:05 crc kubenswrapper[4800]: I1125 15:38:05.905020 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-bcj2h" Nov 25 15:38:05 crc kubenswrapper[4800]: I1125 15:38:05.906224 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7kvq\" (UniqueName: \"kubernetes.io/projected/fb6483f1-4969-48cf-814f-ecdd47c261ec-kube-api-access-w7kvq\") pod \"fb6483f1-4969-48cf-814f-ecdd47c261ec\" (UID: \"fb6483f1-4969-48cf-814f-ecdd47c261ec\") " Nov 25 15:38:05 crc kubenswrapper[4800]: I1125 15:38:05.906386 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb6483f1-4969-48cf-814f-ecdd47c261ec-operator-scripts\") pod \"fb6483f1-4969-48cf-814f-ecdd47c261ec\" (UID: \"fb6483f1-4969-48cf-814f-ecdd47c261ec\") " Nov 25 15:38:05 crc kubenswrapper[4800]: I1125 15:38:05.907970 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb6483f1-4969-48cf-814f-ecdd47c261ec-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fb6483f1-4969-48cf-814f-ecdd47c261ec" (UID: "fb6483f1-4969-48cf-814f-ecdd47c261ec"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:38:05 crc kubenswrapper[4800]: I1125 15:38:05.934611 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb6483f1-4969-48cf-814f-ecdd47c261ec-kube-api-access-w7kvq" (OuterVolumeSpecName: "kube-api-access-w7kvq") pod "fb6483f1-4969-48cf-814f-ecdd47c261ec" (UID: "fb6483f1-4969-48cf-814f-ecdd47c261ec"). InnerVolumeSpecName "kube-api-access-w7kvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:38:05 crc kubenswrapper[4800]: I1125 15:38:05.968623 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0885-account-create-jgg7v" Nov 25 15:38:05 crc kubenswrapper[4800]: I1125 15:38:05.988596 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-thkrz" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.009299 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/868262b9-c453-46a0-8885-1ccf13e06e98-operator-scripts\") pod \"868262b9-c453-46a0-8885-1ccf13e06e98\" (UID: \"868262b9-c453-46a0-8885-1ccf13e06e98\") " Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.009582 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tlzl9\" (UniqueName: \"kubernetes.io/projected/78f8b613-903f-45b0-bf62-176546fd4f72-kube-api-access-tlzl9\") pod \"78f8b613-903f-45b0-bf62-176546fd4f72\" (UID: \"78f8b613-903f-45b0-bf62-176546fd4f72\") " Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.009675 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68fc1e6f-3787-4d76-9ed2-701a8170a037-operator-scripts\") pod \"68fc1e6f-3787-4d76-9ed2-701a8170a037\" (UID: \"68fc1e6f-3787-4d76-9ed2-701a8170a037\") " Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.010090 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7zqr\" (UniqueName: \"kubernetes.io/projected/868262b9-c453-46a0-8885-1ccf13e06e98-kube-api-access-m7zqr\") pod \"868262b9-c453-46a0-8885-1ccf13e06e98\" (UID: \"868262b9-c453-46a0-8885-1ccf13e06e98\") " Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.010173 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/68fc1e6f-3787-4d76-9ed2-701a8170a037-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "68fc1e6f-3787-4d76-9ed2-701a8170a037" (UID: "68fc1e6f-3787-4d76-9ed2-701a8170a037"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.010176 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/868262b9-c453-46a0-8885-1ccf13e06e98-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "868262b9-c453-46a0-8885-1ccf13e06e98" (UID: "868262b9-c453-46a0-8885-1ccf13e06e98"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.010225 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tm8h5\" (UniqueName: \"kubernetes.io/projected/365fdcd1-ee57-49e4-819f-e8c567c99001-kube-api-access-tm8h5\") pod \"365fdcd1-ee57-49e4-819f-e8c567c99001\" (UID: \"365fdcd1-ee57-49e4-819f-e8c567c99001\") " Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.010288 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njxfs\" (UniqueName: \"kubernetes.io/projected/68fc1e6f-3787-4d76-9ed2-701a8170a037-kube-api-access-njxfs\") pod \"68fc1e6f-3787-4d76-9ed2-701a8170a037\" (UID: \"68fc1e6f-3787-4d76-9ed2-701a8170a037\") " Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.010348 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78f8b613-903f-45b0-bf62-176546fd4f72-operator-scripts\") pod \"78f8b613-903f-45b0-bf62-176546fd4f72\" (UID: \"78f8b613-903f-45b0-bf62-176546fd4f72\") " Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.010446 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/365fdcd1-ee57-49e4-819f-e8c567c99001-operator-scripts\") pod \"365fdcd1-ee57-49e4-819f-e8c567c99001\" (UID: \"365fdcd1-ee57-49e4-819f-e8c567c99001\") " Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.010792 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78f8b613-903f-45b0-bf62-176546fd4f72-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "78f8b613-903f-45b0-bf62-176546fd4f72" (UID: "78f8b613-903f-45b0-bf62-176546fd4f72"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.011087 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/365fdcd1-ee57-49e4-819f-e8c567c99001-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "365fdcd1-ee57-49e4-819f-e8c567c99001" (UID: "365fdcd1-ee57-49e4-819f-e8c567c99001"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.014908 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb6483f1-4969-48cf-814f-ecdd47c261ec-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.014964 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78f8b613-903f-45b0-bf62-176546fd4f72-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.014983 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7kvq\" (UniqueName: \"kubernetes.io/projected/fb6483f1-4969-48cf-814f-ecdd47c261ec-kube-api-access-w7kvq\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.015004 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/365fdcd1-ee57-49e4-819f-e8c567c99001-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.015017 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/868262b9-c453-46a0-8885-1ccf13e06e98-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.015031 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68fc1e6f-3787-4d76-9ed2-701a8170a037-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.017169 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78f8b613-903f-45b0-bf62-176546fd4f72-kube-api-access-tlzl9" (OuterVolumeSpecName: "kube-api-access-tlzl9") pod "78f8b613-903f-45b0-bf62-176546fd4f72" (UID: "78f8b613-903f-45b0-bf62-176546fd4f72"). InnerVolumeSpecName "kube-api-access-tlzl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.017335 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68fc1e6f-3787-4d76-9ed2-701a8170a037-kube-api-access-njxfs" (OuterVolumeSpecName: "kube-api-access-njxfs") pod "68fc1e6f-3787-4d76-9ed2-701a8170a037" (UID: "68fc1e6f-3787-4d76-9ed2-701a8170a037"). InnerVolumeSpecName "kube-api-access-njxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.021089 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/868262b9-c453-46a0-8885-1ccf13e06e98-kube-api-access-m7zqr" (OuterVolumeSpecName: "kube-api-access-m7zqr") pod "868262b9-c453-46a0-8885-1ccf13e06e98" (UID: "868262b9-c453-46a0-8885-1ccf13e06e98"). InnerVolumeSpecName "kube-api-access-m7zqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.028710 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/365fdcd1-ee57-49e4-819f-e8c567c99001-kube-api-access-tm8h5" (OuterVolumeSpecName: "kube-api-access-tm8h5") pod "365fdcd1-ee57-49e4-819f-e8c567c99001" (UID: "365fdcd1-ee57-49e4-819f-e8c567c99001"). InnerVolumeSpecName "kube-api-access-tm8h5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.116681 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da401e63-0bdb-4057-844e-c5938c5d9a98-operator-scripts\") pod \"da401e63-0bdb-4057-844e-c5938c5d9a98\" (UID: \"da401e63-0bdb-4057-844e-c5938c5d9a98\") " Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.116929 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjjlj\" (UniqueName: \"kubernetes.io/projected/da401e63-0bdb-4057-844e-c5938c5d9a98-kube-api-access-jjjlj\") pod \"da401e63-0bdb-4057-844e-c5938c5d9a98\" (UID: \"da401e63-0bdb-4057-844e-c5938c5d9a98\") " Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.117367 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7zqr\" (UniqueName: \"kubernetes.io/projected/868262b9-c453-46a0-8885-1ccf13e06e98-kube-api-access-m7zqr\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.117385 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tm8h5\" (UniqueName: \"kubernetes.io/projected/365fdcd1-ee57-49e4-819f-e8c567c99001-kube-api-access-tm8h5\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.117395 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njxfs\" (UniqueName: \"kubernetes.io/projected/68fc1e6f-3787-4d76-9ed2-701a8170a037-kube-api-access-njxfs\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.117405 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tlzl9\" (UniqueName: \"kubernetes.io/projected/78f8b613-903f-45b0-bf62-176546fd4f72-kube-api-access-tlzl9\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.117637 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da401e63-0bdb-4057-844e-c5938c5d9a98-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "da401e63-0bdb-4057-844e-c5938c5d9a98" (UID: "da401e63-0bdb-4057-844e-c5938c5d9a98"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.121311 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da401e63-0bdb-4057-844e-c5938c5d9a98-kube-api-access-jjjlj" (OuterVolumeSpecName: "kube-api-access-jjjlj") pod "da401e63-0bdb-4057-844e-c5938c5d9a98" (UID: "da401e63-0bdb-4057-844e-c5938c5d9a98"). InnerVolumeSpecName "kube-api-access-jjjlj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.219306 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjjlj\" (UniqueName: \"kubernetes.io/projected/da401e63-0bdb-4057-844e-c5938c5d9a98-kube-api-access-jjjlj\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.219353 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da401e63-0bdb-4057-844e-c5938c5d9a98-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.726884 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-thkrz" event={"ID":"da401e63-0bdb-4057-844e-c5938c5d9a98","Type":"ContainerDied","Data":"6852ddaf1ad4e87280a4d7ceb5dc1f7240ede7cfa23d915cd6ddc81c6f0654f7"} Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.727466 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6852ddaf1ad4e87280a4d7ceb5dc1f7240ede7cfa23d915cd6ddc81c6f0654f7" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.726965 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-thkrz" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.729127 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-2ffe-account-create-cxs5m" event={"ID":"868262b9-c453-46a0-8885-1ccf13e06e98","Type":"ContainerDied","Data":"e5416699f35eee57b4d17d6d616c5e6dcaf120f64801456526d1971145c5941f"} Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.729178 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5416699f35eee57b4d17d6d616c5e6dcaf120f64801456526d1971145c5941f" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.729257 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-2ffe-account-create-cxs5m" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.733608 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-p9448" event={"ID":"fb9722e1-2a72-4b42-a605-4e5476890d27","Type":"ContainerStarted","Data":"e54f64b475f6328a1c93e9a91914b6b6980df6c41dd5fb71f5da7f04d7d04191"} Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.737040 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-bcj2h" event={"ID":"68fc1e6f-3787-4d76-9ed2-701a8170a037","Type":"ContainerDied","Data":"13925ae164f36b9e848e1667cdcb3c7cc670f0fa113802fb1aa2a46f381d319a"} Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.737150 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13925ae164f36b9e848e1667cdcb3c7cc670f0fa113802fb1aa2a46f381d319a" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.737376 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-bcj2h" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.742818 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0885-account-create-jgg7v" event={"ID":"78f8b613-903f-45b0-bf62-176546fd4f72","Type":"ContainerDied","Data":"004339295a1ec9ece749574808318879e39c0651c7914c5d5369f50c440a576f"} Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.742882 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="004339295a1ec9ece749574808318879e39c0651c7914c5d5369f50c440a576f" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.742934 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0885-account-create-jgg7v" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.745727 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-h5lb7" event={"ID":"fb6483f1-4969-48cf-814f-ecdd47c261ec","Type":"ContainerDied","Data":"b4302bef2ecc01bbca8cec6f2ab8e5dd06e3b439e6511a08fc3a641d337d425b"} Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.745942 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-h5lb7" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.746401 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4302bef2ecc01bbca8cec6f2ab8e5dd06e3b439e6511a08fc3a641d337d425b" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.747420 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6e3d-account-create-pkf44" event={"ID":"365fdcd1-ee57-49e4-819f-e8c567c99001","Type":"ContainerDied","Data":"cc227100a7829517cf693033ba047d7c2f354eabc91a8c42554d815a6d700fd9"} Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.747449 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc227100a7829517cf693033ba047d7c2f354eabc91a8c42554d815a6d700fd9" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.747514 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6e3d-account-create-pkf44" Nov 25 15:38:06 crc kubenswrapper[4800]: I1125 15:38:06.758093 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-p9448" podStartSLOduration=1.970911671 podStartE2EDuration="8.758070061s" podCreationTimestamp="2025-11-25 15:37:58 +0000 UTC" firstStartedPulling="2025-11-25 15:37:59.286213015 +0000 UTC m=+1240.340621497" lastFinishedPulling="2025-11-25 15:38:06.073371405 +0000 UTC m=+1247.127779887" observedRunningTime="2025-11-25 15:38:06.753784931 +0000 UTC m=+1247.808193413" watchObservedRunningTime="2025-11-25 15:38:06.758070061 +0000 UTC m=+1247.812478553" Nov 25 15:38:08 crc kubenswrapper[4800]: E1125 15:38:08.411340 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2a07cf0_e668_43c3_bc9a_8594243f1d02.slice/crio-2836fd8e5cf597edcb7edd19096fe074494e9c09ebe144346baf7e9428e7ee58\": RecentStats: unable to find data in memory cache]" Nov 25 15:38:12 crc kubenswrapper[4800]: I1125 15:38:12.640120 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:38:12 crc kubenswrapper[4800]: I1125 15:38:12.640734 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:38:12 crc kubenswrapper[4800]: I1125 15:38:12.640798 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:38:12 crc kubenswrapper[4800]: I1125 15:38:12.641704 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d97d2c0b8a05e269074c76cf21138a3aeaeac0cd9bbe1be26dcd5369887e11f6"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 15:38:12 crc kubenswrapper[4800]: I1125 15:38:12.641771 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://d97d2c0b8a05e269074c76cf21138a3aeaeac0cd9bbe1be26dcd5369887e11f6" gracePeriod=600 Nov 25 15:38:12 crc kubenswrapper[4800]: I1125 15:38:12.820276 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-fxh9k" event={"ID":"861a549f-5373-4d45-befd-3859dbfdc705","Type":"ContainerStarted","Data":"e504eafaa9813815df1bd62428bfb4932db7b3b16b2bbcc17fd995833ca13b55"} Nov 25 15:38:12 crc kubenswrapper[4800]: I1125 15:38:12.846451 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-fxh9k" podStartSLOduration=3.121585102 podStartE2EDuration="32.846427606s" podCreationTimestamp="2025-11-25 15:37:40 +0000 UTC" firstStartedPulling="2025-11-25 15:37:41.899473497 +0000 UTC m=+1222.953881979" lastFinishedPulling="2025-11-25 15:38:11.624315991 +0000 UTC m=+1252.678724483" observedRunningTime="2025-11-25 15:38:12.843916425 +0000 UTC m=+1253.898324947" watchObservedRunningTime="2025-11-25 15:38:12.846427606 +0000 UTC m=+1253.900836088" Nov 25 15:38:13 crc kubenswrapper[4800]: I1125 15:38:13.834348 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="d97d2c0b8a05e269074c76cf21138a3aeaeac0cd9bbe1be26dcd5369887e11f6" exitCode=0 Nov 25 15:38:13 crc kubenswrapper[4800]: I1125 15:38:13.834468 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"d97d2c0b8a05e269074c76cf21138a3aeaeac0cd9bbe1be26dcd5369887e11f6"} Nov 25 15:38:13 crc kubenswrapper[4800]: I1125 15:38:13.836019 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"b8afc7cca40a5009587f2c6768805585b09b1bfca0b79d34753356c624725482"} Nov 25 15:38:13 crc kubenswrapper[4800]: I1125 15:38:13.836068 4800 scope.go:117] "RemoveContainer" containerID="1a9b7db7d78c7762803114dfba2c97d5027abe1ed7fd4f553dedba984708c24e" Nov 25 15:38:18 crc kubenswrapper[4800]: E1125 15:38:18.652551 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2a07cf0_e668_43c3_bc9a_8594243f1d02.slice/crio-2836fd8e5cf597edcb7edd19096fe074494e9c09ebe144346baf7e9428e7ee58\": RecentStats: unable to find data in memory cache]" Nov 25 15:38:31 crc kubenswrapper[4800]: I1125 15:38:31.017745 4800 generic.go:334] "Generic (PLEG): container finished" podID="fb9722e1-2a72-4b42-a605-4e5476890d27" containerID="e54f64b475f6328a1c93e9a91914b6b6980df6c41dd5fb71f5da7f04d7d04191" exitCode=0 Nov 25 15:38:31 crc kubenswrapper[4800]: I1125 15:38:31.017829 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-p9448" event={"ID":"fb9722e1-2a72-4b42-a605-4e5476890d27","Type":"ContainerDied","Data":"e54f64b475f6328a1c93e9a91914b6b6980df6c41dd5fb71f5da7f04d7d04191"} Nov 25 15:38:32 crc kubenswrapper[4800]: I1125 15:38:32.383516 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-p9448" Nov 25 15:38:32 crc kubenswrapper[4800]: I1125 15:38:32.539835 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9722e1-2a72-4b42-a605-4e5476890d27-combined-ca-bundle\") pod \"fb9722e1-2a72-4b42-a605-4e5476890d27\" (UID: \"fb9722e1-2a72-4b42-a605-4e5476890d27\") " Nov 25 15:38:32 crc kubenswrapper[4800]: I1125 15:38:32.539946 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9722e1-2a72-4b42-a605-4e5476890d27-config-data\") pod \"fb9722e1-2a72-4b42-a605-4e5476890d27\" (UID: \"fb9722e1-2a72-4b42-a605-4e5476890d27\") " Nov 25 15:38:32 crc kubenswrapper[4800]: I1125 15:38:32.540076 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wbnv7\" (UniqueName: \"kubernetes.io/projected/fb9722e1-2a72-4b42-a605-4e5476890d27-kube-api-access-wbnv7\") pod \"fb9722e1-2a72-4b42-a605-4e5476890d27\" (UID: \"fb9722e1-2a72-4b42-a605-4e5476890d27\") " Nov 25 15:38:32 crc kubenswrapper[4800]: I1125 15:38:32.546502 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb9722e1-2a72-4b42-a605-4e5476890d27-kube-api-access-wbnv7" (OuterVolumeSpecName: "kube-api-access-wbnv7") pod "fb9722e1-2a72-4b42-a605-4e5476890d27" (UID: "fb9722e1-2a72-4b42-a605-4e5476890d27"). InnerVolumeSpecName "kube-api-access-wbnv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:38:32 crc kubenswrapper[4800]: I1125 15:38:32.564241 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb9722e1-2a72-4b42-a605-4e5476890d27-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fb9722e1-2a72-4b42-a605-4e5476890d27" (UID: "fb9722e1-2a72-4b42-a605-4e5476890d27"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:38:32 crc kubenswrapper[4800]: I1125 15:38:32.583067 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb9722e1-2a72-4b42-a605-4e5476890d27-config-data" (OuterVolumeSpecName: "config-data") pod "fb9722e1-2a72-4b42-a605-4e5476890d27" (UID: "fb9722e1-2a72-4b42-a605-4e5476890d27"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:38:32 crc kubenswrapper[4800]: I1125 15:38:32.643082 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9722e1-2a72-4b42-a605-4e5476890d27-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:32 crc kubenswrapper[4800]: I1125 15:38:32.643141 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9722e1-2a72-4b42-a605-4e5476890d27-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:32 crc kubenswrapper[4800]: I1125 15:38:32.643152 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wbnv7\" (UniqueName: \"kubernetes.io/projected/fb9722e1-2a72-4b42-a605-4e5476890d27-kube-api-access-wbnv7\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.039102 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-p9448" event={"ID":"fb9722e1-2a72-4b42-a605-4e5476890d27","Type":"ContainerDied","Data":"57212092ab60d8fbd3e6b8e34a798f541f1727d567620303a61b3f19d8215cf8"} Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.039629 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57212092ab60d8fbd3e6b8e34a798f541f1727d567620303a61b3f19d8215cf8" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.039181 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-p9448" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.333583 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85959f8897-glhss"] Nov 25 15:38:33 crc kubenswrapper[4800]: E1125 15:38:33.334001 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9722e1-2a72-4b42-a605-4e5476890d27" containerName="keystone-db-sync" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334021 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9722e1-2a72-4b42-a605-4e5476890d27" containerName="keystone-db-sync" Nov 25 15:38:33 crc kubenswrapper[4800]: E1125 15:38:33.334039 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="365fdcd1-ee57-49e4-819f-e8c567c99001" containerName="mariadb-account-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334046 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="365fdcd1-ee57-49e4-819f-e8c567c99001" containerName="mariadb-account-create" Nov 25 15:38:33 crc kubenswrapper[4800]: E1125 15:38:33.334078 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="868262b9-c453-46a0-8885-1ccf13e06e98" containerName="mariadb-account-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334084 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="868262b9-c453-46a0-8885-1ccf13e06e98" containerName="mariadb-account-create" Nov 25 15:38:33 crc kubenswrapper[4800]: E1125 15:38:33.334093 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb6483f1-4969-48cf-814f-ecdd47c261ec" containerName="mariadb-database-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334099 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb6483f1-4969-48cf-814f-ecdd47c261ec" containerName="mariadb-database-create" Nov 25 15:38:33 crc kubenswrapper[4800]: E1125 15:38:33.334109 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78f8b613-903f-45b0-bf62-176546fd4f72" containerName="mariadb-account-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334114 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="78f8b613-903f-45b0-bf62-176546fd4f72" containerName="mariadb-account-create" Nov 25 15:38:33 crc kubenswrapper[4800]: E1125 15:38:33.334126 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da401e63-0bdb-4057-844e-c5938c5d9a98" containerName="mariadb-database-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334134 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="da401e63-0bdb-4057-844e-c5938c5d9a98" containerName="mariadb-database-create" Nov 25 15:38:33 crc kubenswrapper[4800]: E1125 15:38:33.334144 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68fc1e6f-3787-4d76-9ed2-701a8170a037" containerName="mariadb-database-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334152 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="68fc1e6f-3787-4d76-9ed2-701a8170a037" containerName="mariadb-database-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334316 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="da401e63-0bdb-4057-844e-c5938c5d9a98" containerName="mariadb-database-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334331 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="78f8b613-903f-45b0-bf62-176546fd4f72" containerName="mariadb-account-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334340 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="868262b9-c453-46a0-8885-1ccf13e06e98" containerName="mariadb-account-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334352 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="68fc1e6f-3787-4d76-9ed2-701a8170a037" containerName="mariadb-database-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334358 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9722e1-2a72-4b42-a605-4e5476890d27" containerName="keystone-db-sync" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334380 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb6483f1-4969-48cf-814f-ecdd47c261ec" containerName="mariadb-database-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.334390 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="365fdcd1-ee57-49e4-819f-e8c567c99001" containerName="mariadb-account-create" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.335341 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.345735 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-n4hqc"] Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.347303 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.355516 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.355744 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.355957 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.356264 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-p5fqm" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.356596 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.376271 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85959f8897-glhss"] Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.399710 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-n4hqc"] Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.460276 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-ovsdbserver-nb\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.460337 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-config-data\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.460364 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-dns-svc\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.460381 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-credential-keys\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.460401 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxdk6\" (UniqueName: \"kubernetes.io/projected/34d241a1-0cf3-4b95-933c-4d05d3d98929-kube-api-access-wxdk6\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.460426 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-config\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.460445 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-scripts\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.460465 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-combined-ca-bundle\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.460492 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-ovsdbserver-sb\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.460522 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-fernet-keys\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.460561 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hn726\" (UniqueName: \"kubernetes.io/projected/6d47c54e-ce18-4454-944a-d2cf0df370a8-kube-api-access-hn726\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.566220 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hn726\" (UniqueName: \"kubernetes.io/projected/6d47c54e-ce18-4454-944a-d2cf0df370a8-kube-api-access-hn726\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.566278 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-ovsdbserver-nb\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.566308 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-config-data\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.566328 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-dns-svc\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.566345 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-credential-keys\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.566366 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxdk6\" (UniqueName: \"kubernetes.io/projected/34d241a1-0cf3-4b95-933c-4d05d3d98929-kube-api-access-wxdk6\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.566391 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-config\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.566407 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-scripts\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.566426 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-combined-ca-bundle\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.566454 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-ovsdbserver-sb\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.566483 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-fernet-keys\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.568367 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-ovsdbserver-nb\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.572812 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-54cdc54fcf-c45jx"] Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.574865 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-dns-svc\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.575417 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-ovsdbserver-sb\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.576212 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-config\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.577594 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.585785 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-scripts\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.586257 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.586655 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-config-data\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.589649 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-credential-keys\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.590700 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.590908 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.592598 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-combined-ca-bundle\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.596205 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-fernet-keys\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.609220 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-kr8gm" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.634576 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxdk6\" (UniqueName: \"kubernetes.io/projected/34d241a1-0cf3-4b95-933c-4d05d3d98929-kube-api-access-wxdk6\") pod \"dnsmasq-dns-85959f8897-glhss\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.638694 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hn726\" (UniqueName: \"kubernetes.io/projected/6d47c54e-ce18-4454-944a-d2cf0df370a8-kube-api-access-hn726\") pod \"keystone-bootstrap-n4hqc\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.659124 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-54cdc54fcf-c45jx"] Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.661049 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.689340 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.747929 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-2d48v"] Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.749271 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.754577 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.764744 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.765092 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8pz6v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.766275 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-2d48v"] Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.780915 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-7w7c7"] Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.782178 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.801234 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.801550 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-xnnhl" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.812533 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-combined-ca-bundle\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.812710 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-config-data\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.812859 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-db-sync-config-data\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.812950 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fjlr\" (UniqueName: \"kubernetes.io/projected/15830745-aef8-4482-8885-6a5969795af6-kube-api-access-9fjlr\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.813060 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3674fdb-30d8-402d-b9a7-419574d7a0c9-logs\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.813150 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15830745-aef8-4482-8885-6a5969795af6-etc-machine-id\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.813249 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d3674fdb-30d8-402d-b9a7-419574d7a0c9-horizon-secret-key\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.813334 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-combined-ca-bundle\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.813418 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3674fdb-30d8-402d-b9a7-419574d7a0c9-scripts\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.813561 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-scripts\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.813645 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-config-data\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.813725 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ff2s2\" (UniqueName: \"kubernetes.io/projected/8e726809-c215-4d1a-95a3-d0fadede3cca-kube-api-access-ff2s2\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.813807 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-scripts\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.813908 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e726809-c215-4d1a-95a3-d0fadede3cca-logs\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.827576 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqkd9\" (UniqueName: \"kubernetes.io/projected/d3674fdb-30d8-402d-b9a7-419574d7a0c9-kube-api-access-fqkd9\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.827723 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d3674fdb-30d8-402d-b9a7-419574d7a0c9-config-data\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.825894 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.848069 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-7w7c7"] Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.903993 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-rq7kb"] Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.905225 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.914072 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-frhp6" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.914335 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.914889 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.931812 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-shvrf"] Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.933051 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-shvrf" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.934827 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-config-data\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.934903 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm78s\" (UniqueName: \"kubernetes.io/projected/28782c8c-88d7-48d6-bd10-3b64cff49706-kube-api-access-hm78s\") pod \"neutron-db-sync-rq7kb\" (UID: \"28782c8c-88d7-48d6-bd10-3b64cff49706\") " pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.934932 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-db-sync-config-data\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.934958 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fjlr\" (UniqueName: \"kubernetes.io/projected/15830745-aef8-4482-8885-6a5969795af6-kube-api-access-9fjlr\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.934984 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3674fdb-30d8-402d-b9a7-419574d7a0c9-logs\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935005 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15830745-aef8-4482-8885-6a5969795af6-etc-machine-id\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935032 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/28782c8c-88d7-48d6-bd10-3b64cff49706-config\") pod \"neutron-db-sync-rq7kb\" (UID: \"28782c8c-88d7-48d6-bd10-3b64cff49706\") " pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935057 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d3674fdb-30d8-402d-b9a7-419574d7a0c9-horizon-secret-key\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935077 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-combined-ca-bundle\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935099 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3674fdb-30d8-402d-b9a7-419574d7a0c9-scripts\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935137 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-scripts\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935156 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-config-data\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935175 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ff2s2\" (UniqueName: \"kubernetes.io/projected/8e726809-c215-4d1a-95a3-d0fadede3cca-kube-api-access-ff2s2\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935194 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-scripts\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935215 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e726809-c215-4d1a-95a3-d0fadede3cca-logs\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935239 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqkd9\" (UniqueName: \"kubernetes.io/projected/d3674fdb-30d8-402d-b9a7-419574d7a0c9-kube-api-access-fqkd9\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935260 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d3674fdb-30d8-402d-b9a7-419574d7a0c9-config-data\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935287 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28782c8c-88d7-48d6-bd10-3b64cff49706-combined-ca-bundle\") pod \"neutron-db-sync-rq7kb\" (UID: \"28782c8c-88d7-48d6-bd10-3b64cff49706\") " pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.935323 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-combined-ca-bundle\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.941580 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.941917 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-jcx75" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.942286 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15830745-aef8-4482-8885-6a5969795af6-etc-machine-id\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.942684 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3674fdb-30d8-402d-b9a7-419574d7a0c9-logs\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.944750 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e726809-c215-4d1a-95a3-d0fadede3cca-logs\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.945432 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3674fdb-30d8-402d-b9a7-419574d7a0c9-scripts\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.946322 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d3674fdb-30d8-402d-b9a7-419574d7a0c9-config-data\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.966936 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-rq7kb"] Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.982878 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-scripts\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.983575 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-combined-ca-bundle\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.983796 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-combined-ca-bundle\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.984072 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-config-data\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.986200 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d3674fdb-30d8-402d-b9a7-419574d7a0c9-horizon-secret-key\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.987216 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-db-sync-config-data\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.990337 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-config-data\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.990699 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqkd9\" (UniqueName: \"kubernetes.io/projected/d3674fdb-30d8-402d-b9a7-419574d7a0c9-kube-api-access-fqkd9\") pod \"horizon-54cdc54fcf-c45jx\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.990822 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ff2s2\" (UniqueName: \"kubernetes.io/projected/8e726809-c215-4d1a-95a3-d0fadede3cca-kube-api-access-ff2s2\") pod \"placement-db-sync-7w7c7\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.992620 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-scripts\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:33 crc kubenswrapper[4800]: I1125 15:38:33.998720 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-shvrf"] Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.025298 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85959f8897-glhss"] Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.057365 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fjlr\" (UniqueName: \"kubernetes.io/projected/15830745-aef8-4482-8885-6a5969795af6-kube-api-access-9fjlr\") pod \"cinder-db-sync-2d48v\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.060721 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b030f9b2-f92c-40d4-b92a-7c99d4af8358-combined-ca-bundle\") pod \"barbican-db-sync-shvrf\" (UID: \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\") " pod="openstack/barbican-db-sync-shvrf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.060812 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28782c8c-88d7-48d6-bd10-3b64cff49706-combined-ca-bundle\") pod \"neutron-db-sync-rq7kb\" (UID: \"28782c8c-88d7-48d6-bd10-3b64cff49706\") " pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.060903 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hm78s\" (UniqueName: \"kubernetes.io/projected/28782c8c-88d7-48d6-bd10-3b64cff49706-kube-api-access-hm78s\") pod \"neutron-db-sync-rq7kb\" (UID: \"28782c8c-88d7-48d6-bd10-3b64cff49706\") " pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.060976 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/28782c8c-88d7-48d6-bd10-3b64cff49706-config\") pod \"neutron-db-sync-rq7kb\" (UID: \"28782c8c-88d7-48d6-bd10-3b64cff49706\") " pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.061020 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67jvq\" (UniqueName: \"kubernetes.io/projected/b030f9b2-f92c-40d4-b92a-7c99d4af8358-kube-api-access-67jvq\") pod \"barbican-db-sync-shvrf\" (UID: \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\") " pod="openstack/barbican-db-sync-shvrf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.061389 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b030f9b2-f92c-40d4-b92a-7c99d4af8358-db-sync-config-data\") pod \"barbican-db-sync-shvrf\" (UID: \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\") " pod="openstack/barbican-db-sync-shvrf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.074878 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/28782c8c-88d7-48d6-bd10-3b64cff49706-config\") pod \"neutron-db-sync-rq7kb\" (UID: \"28782c8c-88d7-48d6-bd10-3b64cff49706\") " pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.077717 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.099303 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28782c8c-88d7-48d6-bd10-3b64cff49706-combined-ca-bundle\") pod \"neutron-db-sync-rq7kb\" (UID: \"28782c8c-88d7-48d6-bd10-3b64cff49706\") " pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.117005 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm78s\" (UniqueName: \"kubernetes.io/projected/28782c8c-88d7-48d6-bd10-3b64cff49706-kube-api-access-hm78s\") pod \"neutron-db-sync-rq7kb\" (UID: \"28782c8c-88d7-48d6-bd10-3b64cff49706\") " pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.126225 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.145436 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.156166 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.153423 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.166077 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b030f9b2-f92c-40d4-b92a-7c99d4af8358-combined-ca-bundle\") pod \"barbican-db-sync-shvrf\" (UID: \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\") " pod="openstack/barbican-db-sync-shvrf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.166313 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67jvq\" (UniqueName: \"kubernetes.io/projected/b030f9b2-f92c-40d4-b92a-7c99d4af8358-kube-api-access-67jvq\") pod \"barbican-db-sync-shvrf\" (UID: \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\") " pod="openstack/barbican-db-sync-shvrf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.166420 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b030f9b2-f92c-40d4-b92a-7c99d4af8358-db-sync-config-data\") pod \"barbican-db-sync-shvrf\" (UID: \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\") " pod="openstack/barbican-db-sync-shvrf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.170447 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b030f9b2-f92c-40d4-b92a-7c99d4af8358-db-sync-config-data\") pod \"barbican-db-sync-shvrf\" (UID: \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\") " pod="openstack/barbican-db-sync-shvrf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.181996 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b030f9b2-f92c-40d4-b92a-7c99d4af8358-combined-ca-bundle\") pod \"barbican-db-sync-shvrf\" (UID: \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\") " pod="openstack/barbican-db-sync-shvrf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.192598 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67jvq\" (UniqueName: \"kubernetes.io/projected/b030f9b2-f92c-40d4-b92a-7c99d4af8358-kube-api-access-67jvq\") pod \"barbican-db-sync-shvrf\" (UID: \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\") " pod="openstack/barbican-db-sync-shvrf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.194955 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-2d48v" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.220377 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-7w7c7" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.221670 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.245943 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-558cd77c67-jcqhf"] Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.248113 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.256024 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-558cd77c67-jcqhf"] Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.264391 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.270440 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5497646597-g5gld"] Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.272648 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.273347 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-scripts\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.273482 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.273519 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.273562 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-log-httpd\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.273585 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-run-httpd\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.273623 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkfsl\" (UniqueName: \"kubernetes.io/projected/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-kube-api-access-dkfsl\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.273820 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-config-data\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.284168 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5497646597-g5gld"] Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.286698 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-shvrf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375193 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-config-data\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375287 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmjd8\" (UniqueName: \"kubernetes.io/projected/2278309e-6b47-4375-b433-55a0c80ef751-kube-api-access-gmjd8\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375323 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-ovsdbserver-sb\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375342 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-ovsdbserver-nb\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375380 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-scripts\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375402 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-dns-svc\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375423 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bdf98904-e8fc-4c69-9dc7-5e522c269236-horizon-secret-key\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375442 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bdf98904-e8fc-4c69-9dc7-5e522c269236-logs\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375462 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375482 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5pk4\" (UniqueName: \"kubernetes.io/projected/bdf98904-e8fc-4c69-9dc7-5e522c269236-kube-api-access-w5pk4\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375501 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375533 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-config\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.375602 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-log-httpd\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.376181 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-log-httpd\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.376238 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-run-httpd\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.376645 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-run-httpd\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.376681 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bdf98904-e8fc-4c69-9dc7-5e522c269236-config-data\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.376705 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdf98904-e8fc-4c69-9dc7-5e522c269236-scripts\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.376731 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkfsl\" (UniqueName: \"kubernetes.io/projected/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-kube-api-access-dkfsl\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.392341 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.395814 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-scripts\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.400530 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.400824 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-config-data\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.403991 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkfsl\" (UniqueName: \"kubernetes.io/projected/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-kube-api-access-dkfsl\") pod \"ceilometer-0\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.474352 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.478520 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmjd8\" (UniqueName: \"kubernetes.io/projected/2278309e-6b47-4375-b433-55a0c80ef751-kube-api-access-gmjd8\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.478589 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-ovsdbserver-nb\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.478608 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-ovsdbserver-sb\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.478660 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-dns-svc\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.478689 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bdf98904-e8fc-4c69-9dc7-5e522c269236-horizon-secret-key\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.478715 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bdf98904-e8fc-4c69-9dc7-5e522c269236-logs\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.478737 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5pk4\" (UniqueName: \"kubernetes.io/projected/bdf98904-e8fc-4c69-9dc7-5e522c269236-kube-api-access-w5pk4\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.478771 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-config\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.478798 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bdf98904-e8fc-4c69-9dc7-5e522c269236-config-data\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.478820 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdf98904-e8fc-4c69-9dc7-5e522c269236-scripts\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.480090 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdf98904-e8fc-4c69-9dc7-5e522c269236-scripts\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.481264 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bdf98904-e8fc-4c69-9dc7-5e522c269236-logs\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.481333 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-ovsdbserver-nb\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.481695 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-config\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.482178 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-ovsdbserver-sb\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.482600 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bdf98904-e8fc-4c69-9dc7-5e522c269236-config-data\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.484257 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-dns-svc\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.489348 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bdf98904-e8fc-4c69-9dc7-5e522c269236-horizon-secret-key\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.499780 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5pk4\" (UniqueName: \"kubernetes.io/projected/bdf98904-e8fc-4c69-9dc7-5e522c269236-kube-api-access-w5pk4\") pod \"horizon-5497646597-g5gld\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.500299 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmjd8\" (UniqueName: \"kubernetes.io/projected/2278309e-6b47-4375-b433-55a0c80ef751-kube-api-access-gmjd8\") pod \"dnsmasq-dns-558cd77c67-jcqhf\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.541757 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-n4hqc"] Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.574438 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.596039 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5497646597-g5gld" Nov 25 15:38:34 crc kubenswrapper[4800]: I1125 15:38:34.659299 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85959f8897-glhss"] Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.082401 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-54cdc54fcf-c45jx"] Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.111045 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-rq7kb"] Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.120773 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-7w7c7"] Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.127901 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-2d48v"] Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.133258 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rq7kb" event={"ID":"28782c8c-88d7-48d6-bd10-3b64cff49706","Type":"ContainerStarted","Data":"8127188c31016c69be415f2f79c24a032c3652050abfe5a147183ed5bc468fb4"} Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.134214 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-54cdc54fcf-c45jx" event={"ID":"d3674fdb-30d8-402d-b9a7-419574d7a0c9","Type":"ContainerStarted","Data":"2ee82f714777795118ed874f21573e5107a2d50fc7c5bfd8cefc7e74a83c2293"} Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.142088 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85959f8897-glhss" event={"ID":"34d241a1-0cf3-4b95-933c-4d05d3d98929","Type":"ContainerStarted","Data":"5bcdfa05f5d430db2c9b1450b827c3b3aefcc49776e189ac9c3509470e1b4b55"} Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.142142 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85959f8897-glhss" event={"ID":"34d241a1-0cf3-4b95-933c-4d05d3d98929","Type":"ContainerStarted","Data":"6cd1e6e342e73570def5721cdd7e9793b34876b8fcd811b72e96561b788d2628"} Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.143873 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-7w7c7" event={"ID":"8e726809-c215-4d1a-95a3-d0fadede3cca","Type":"ContainerStarted","Data":"126e343945a386573f7a985814f180bc379c483865c325a1d87fbca3fd91cda2"} Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.152401 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-shvrf"] Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.178973 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-n4hqc" event={"ID":"6d47c54e-ce18-4454-944a-d2cf0df370a8","Type":"ContainerStarted","Data":"4841443f209fa509e5de46b7e1e5425f75fb687ac606a5fc0b1d36c07dab811f"} Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.179018 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-n4hqc" event={"ID":"6d47c54e-ce18-4454-944a-d2cf0df370a8","Type":"ContainerStarted","Data":"d082809fcd4b3cec8347ce54805f6dc25b3028ffeb10185b0ee0ee25dde82143"} Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.200174 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-n4hqc" podStartSLOduration=2.200144496 podStartE2EDuration="2.200144496s" podCreationTimestamp="2025-11-25 15:38:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:38:35.198325915 +0000 UTC m=+1276.252734397" watchObservedRunningTime="2025-11-25 15:38:35.200144496 +0000 UTC m=+1276.254552998" Nov 25 15:38:35 crc kubenswrapper[4800]: W1125 15:38:35.250240 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod445ab78d_8d8a_4f60_8daa_3fab07b4dfa8.slice/crio-e50bb773dfba6651cad6a6fefc1f7afec1dbadd8ac252ffad5b2638fec8c882b WatchSource:0}: Error finding container e50bb773dfba6651cad6a6fefc1f7afec1dbadd8ac252ffad5b2638fec8c882b: Status 404 returned error can't find the container with id e50bb773dfba6651cad6a6fefc1f7afec1dbadd8ac252ffad5b2638fec8c882b Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.257953 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.361599 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-558cd77c67-jcqhf"] Nov 25 15:38:35 crc kubenswrapper[4800]: I1125 15:38:35.372345 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5497646597-g5gld"] Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.204698 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-shvrf" event={"ID":"b030f9b2-f92c-40d4-b92a-7c99d4af8358","Type":"ContainerStarted","Data":"e345380c05e09fd867477710bdb4127ff67f1234eb92d68a38b966b7c9943b0d"} Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.211225 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rq7kb" event={"ID":"28782c8c-88d7-48d6-bd10-3b64cff49706","Type":"ContainerStarted","Data":"0eefb7b7fc795dec6cd9666544ce5a4b72c5db4487024f3254aa19e1f2bcbfb8"} Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.217855 4800 generic.go:334] "Generic (PLEG): container finished" podID="2278309e-6b47-4375-b433-55a0c80ef751" containerID="f672a7685b2cda3a31dc2c11becc300404c3ad83504d7221692a52ca0723354a" exitCode=0 Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.217960 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" event={"ID":"2278309e-6b47-4375-b433-55a0c80ef751","Type":"ContainerDied","Data":"f672a7685b2cda3a31dc2c11becc300404c3ad83504d7221692a52ca0723354a"} Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.218197 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" event={"ID":"2278309e-6b47-4375-b433-55a0c80ef751","Type":"ContainerStarted","Data":"99d2400df918ce28162eede650d7e219f8a0bb7608409c87fb601820dff73a17"} Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.220642 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5497646597-g5gld" event={"ID":"bdf98904-e8fc-4c69-9dc7-5e522c269236","Type":"ContainerStarted","Data":"ef30c8afd5c52c8c5ee0135e8e54af6e108e0ba3c1aa84b4e8596b1152b4dd1f"} Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.222624 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-2d48v" event={"ID":"15830745-aef8-4482-8885-6a5969795af6","Type":"ContainerStarted","Data":"a65000abd7aa2634e232fc030fdd92b55d0c49b5c047748227e6a156fe3b6385"} Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.225892 4800 generic.go:334] "Generic (PLEG): container finished" podID="34d241a1-0cf3-4b95-933c-4d05d3d98929" containerID="5bcdfa05f5d430db2c9b1450b827c3b3aefcc49776e189ac9c3509470e1b4b55" exitCode=0 Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.226075 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85959f8897-glhss" event={"ID":"34d241a1-0cf3-4b95-933c-4d05d3d98929","Type":"ContainerDied","Data":"5bcdfa05f5d430db2c9b1450b827c3b3aefcc49776e189ac9c3509470e1b4b55"} Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.234394 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8","Type":"ContainerStarted","Data":"e50bb773dfba6651cad6a6fefc1f7afec1dbadd8ac252ffad5b2638fec8c882b"} Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.240766 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-rq7kb" podStartSLOduration=3.240734638 podStartE2EDuration="3.240734638s" podCreationTimestamp="2025-11-25 15:38:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:38:36.23481377 +0000 UTC m=+1277.289222252" watchObservedRunningTime="2025-11-25 15:38:36.240734638 +0000 UTC m=+1277.295143120" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.455303 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-54cdc54fcf-c45jx"] Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.515862 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.552095 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-78665cb57c-77xrd"] Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.553943 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.563173 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-78665cb57c-77xrd"] Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.649480 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcw4c\" (UniqueName: \"kubernetes.io/projected/c2f9ee92-fddb-49cf-bb5c-de3435545b92-kube-api-access-pcw4c\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.649584 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2f9ee92-fddb-49cf-bb5c-de3435545b92-scripts\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.649629 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c2f9ee92-fddb-49cf-bb5c-de3435545b92-horizon-secret-key\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.649672 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2f9ee92-fddb-49cf-bb5c-de3435545b92-logs\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.649763 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2f9ee92-fddb-49cf-bb5c-de3435545b92-config-data\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.751468 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcw4c\" (UniqueName: \"kubernetes.io/projected/c2f9ee92-fddb-49cf-bb5c-de3435545b92-kube-api-access-pcw4c\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.751985 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2f9ee92-fddb-49cf-bb5c-de3435545b92-scripts\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.752035 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c2f9ee92-fddb-49cf-bb5c-de3435545b92-horizon-secret-key\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.752088 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2f9ee92-fddb-49cf-bb5c-de3435545b92-logs\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.752128 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2f9ee92-fddb-49cf-bb5c-de3435545b92-config-data\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.753386 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2f9ee92-fddb-49cf-bb5c-de3435545b92-logs\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.754585 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2f9ee92-fddb-49cf-bb5c-de3435545b92-config-data\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.755677 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2f9ee92-fddb-49cf-bb5c-de3435545b92-scripts\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.758570 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c2f9ee92-fddb-49cf-bb5c-de3435545b92-horizon-secret-key\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.777831 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcw4c\" (UniqueName: \"kubernetes.io/projected/c2f9ee92-fddb-49cf-bb5c-de3435545b92-kube-api-access-pcw4c\") pod \"horizon-78665cb57c-77xrd\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.876033 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.910970 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.955669 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-ovsdbserver-nb\") pod \"34d241a1-0cf3-4b95-933c-4d05d3d98929\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.955893 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-ovsdbserver-sb\") pod \"34d241a1-0cf3-4b95-933c-4d05d3d98929\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.955975 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-config\") pod \"34d241a1-0cf3-4b95-933c-4d05d3d98929\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.956038 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxdk6\" (UniqueName: \"kubernetes.io/projected/34d241a1-0cf3-4b95-933c-4d05d3d98929-kube-api-access-wxdk6\") pod \"34d241a1-0cf3-4b95-933c-4d05d3d98929\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.956071 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-dns-svc\") pod \"34d241a1-0cf3-4b95-933c-4d05d3d98929\" (UID: \"34d241a1-0cf3-4b95-933c-4d05d3d98929\") " Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.983339 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34d241a1-0cf3-4b95-933c-4d05d3d98929-kube-api-access-wxdk6" (OuterVolumeSpecName: "kube-api-access-wxdk6") pod "34d241a1-0cf3-4b95-933c-4d05d3d98929" (UID: "34d241a1-0cf3-4b95-933c-4d05d3d98929"). InnerVolumeSpecName "kube-api-access-wxdk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.984754 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "34d241a1-0cf3-4b95-933c-4d05d3d98929" (UID: "34d241a1-0cf3-4b95-933c-4d05d3d98929"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.990828 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "34d241a1-0cf3-4b95-933c-4d05d3d98929" (UID: "34d241a1-0cf3-4b95-933c-4d05d3d98929"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:38:36 crc kubenswrapper[4800]: I1125 15:38:36.993453 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "34d241a1-0cf3-4b95-933c-4d05d3d98929" (UID: "34d241a1-0cf3-4b95-933c-4d05d3d98929"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.014104 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-config" (OuterVolumeSpecName: "config") pod "34d241a1-0cf3-4b95-933c-4d05d3d98929" (UID: "34d241a1-0cf3-4b95-933c-4d05d3d98929"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.067324 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.067355 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.067368 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxdk6\" (UniqueName: \"kubernetes.io/projected/34d241a1-0cf3-4b95-933c-4d05d3d98929-kube-api-access-wxdk6\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.067378 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.067387 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34d241a1-0cf3-4b95-933c-4d05d3d98929-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.264569 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" event={"ID":"2278309e-6b47-4375-b433-55a0c80ef751","Type":"ContainerStarted","Data":"acd0b4ebc8a0b83be0223d88fc94eec701c5104bbfffc21a72c1d40dde269d66"} Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.265688 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.267297 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85959f8897-glhss" Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.267516 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85959f8897-glhss" event={"ID":"34d241a1-0cf3-4b95-933c-4d05d3d98929","Type":"ContainerDied","Data":"6cd1e6e342e73570def5721cdd7e9793b34876b8fcd811b72e96561b788d2628"} Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.267592 4800 scope.go:117] "RemoveContainer" containerID="5bcdfa05f5d430db2c9b1450b827c3b3aefcc49776e189ac9c3509470e1b4b55" Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.275248 4800 generic.go:334] "Generic (PLEG): container finished" podID="861a549f-5373-4d45-befd-3859dbfdc705" containerID="e504eafaa9813815df1bd62428bfb4932db7b3b16b2bbcc17fd995833ca13b55" exitCode=0 Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.275832 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-fxh9k" event={"ID":"861a549f-5373-4d45-befd-3859dbfdc705","Type":"ContainerDied","Data":"e504eafaa9813815df1bd62428bfb4932db7b3b16b2bbcc17fd995833ca13b55"} Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.294546 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" podStartSLOduration=4.2945237800000005 podStartE2EDuration="4.29452378s" podCreationTimestamp="2025-11-25 15:38:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:38:37.289501418 +0000 UTC m=+1278.343909900" watchObservedRunningTime="2025-11-25 15:38:37.29452378 +0000 UTC m=+1278.348932262" Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.397975 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85959f8897-glhss"] Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.413258 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85959f8897-glhss"] Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.449738 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-78665cb57c-77xrd"] Nov 25 15:38:37 crc kubenswrapper[4800]: W1125 15:38:37.560273 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2f9ee92_fddb_49cf_bb5c_de3435545b92.slice/crio-1f2c74349551e4087f959d757c488b95194c30a2bbb7bbd52a5a2421690f0805 WatchSource:0}: Error finding container 1f2c74349551e4087f959d757c488b95194c30a2bbb7bbd52a5a2421690f0805: Status 404 returned error can't find the container with id 1f2c74349551e4087f959d757c488b95194c30a2bbb7bbd52a5a2421690f0805 Nov 25 15:38:37 crc kubenswrapper[4800]: I1125 15:38:37.811150 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34d241a1-0cf3-4b95-933c-4d05d3d98929" path="/var/lib/kubelet/pods/34d241a1-0cf3-4b95-933c-4d05d3d98929/volumes" Nov 25 15:38:38 crc kubenswrapper[4800]: I1125 15:38:38.287276 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78665cb57c-77xrd" event={"ID":"c2f9ee92-fddb-49cf-bb5c-de3435545b92","Type":"ContainerStarted","Data":"1f2c74349551e4087f959d757c488b95194c30a2bbb7bbd52a5a2421690f0805"} Nov 25 15:38:38 crc kubenswrapper[4800]: I1125 15:38:38.777380 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-fxh9k" Nov 25 15:38:38 crc kubenswrapper[4800]: I1125 15:38:38.915551 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-db-sync-config-data\") pod \"861a549f-5373-4d45-befd-3859dbfdc705\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " Nov 25 15:38:38 crc kubenswrapper[4800]: I1125 15:38:38.915610 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzpvf\" (UniqueName: \"kubernetes.io/projected/861a549f-5373-4d45-befd-3859dbfdc705-kube-api-access-kzpvf\") pod \"861a549f-5373-4d45-befd-3859dbfdc705\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " Nov 25 15:38:38 crc kubenswrapper[4800]: I1125 15:38:38.915750 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-config-data\") pod \"861a549f-5373-4d45-befd-3859dbfdc705\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " Nov 25 15:38:38 crc kubenswrapper[4800]: I1125 15:38:38.915794 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-combined-ca-bundle\") pod \"861a549f-5373-4d45-befd-3859dbfdc705\" (UID: \"861a549f-5373-4d45-befd-3859dbfdc705\") " Nov 25 15:38:38 crc kubenswrapper[4800]: I1125 15:38:38.927871 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "861a549f-5373-4d45-befd-3859dbfdc705" (UID: "861a549f-5373-4d45-befd-3859dbfdc705"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:38:38 crc kubenswrapper[4800]: I1125 15:38:38.933886 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/861a549f-5373-4d45-befd-3859dbfdc705-kube-api-access-kzpvf" (OuterVolumeSpecName: "kube-api-access-kzpvf") pod "861a549f-5373-4d45-befd-3859dbfdc705" (UID: "861a549f-5373-4d45-befd-3859dbfdc705"). InnerVolumeSpecName "kube-api-access-kzpvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:38:38 crc kubenswrapper[4800]: I1125 15:38:38.950899 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "861a549f-5373-4d45-befd-3859dbfdc705" (UID: "861a549f-5373-4d45-befd-3859dbfdc705"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:38:38 crc kubenswrapper[4800]: I1125 15:38:38.991963 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-config-data" (OuterVolumeSpecName: "config-data") pod "861a549f-5373-4d45-befd-3859dbfdc705" (UID: "861a549f-5373-4d45-befd-3859dbfdc705"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.036188 4800 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.036248 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzpvf\" (UniqueName: \"kubernetes.io/projected/861a549f-5373-4d45-befd-3859dbfdc705-kube-api-access-kzpvf\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.036263 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.036272 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/861a549f-5373-4d45-befd-3859dbfdc705-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.323809 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-fxh9k" event={"ID":"861a549f-5373-4d45-befd-3859dbfdc705","Type":"ContainerDied","Data":"596b6f80c2daea62bca50f8964ff79865338839f010227c4b3f1727925d14cb7"} Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.323915 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="596b6f80c2daea62bca50f8964ff79865338839f010227c4b3f1727925d14cb7" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.324271 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-fxh9k" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.748125 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-558cd77c67-jcqhf"] Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.751066 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" podUID="2278309e-6b47-4375-b433-55a0c80ef751" containerName="dnsmasq-dns" containerID="cri-o://acd0b4ebc8a0b83be0223d88fc94eec701c5104bbfffc21a72c1d40dde269d66" gracePeriod=10 Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.783365 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f8f5cc67-jq58c"] Nov 25 15:38:39 crc kubenswrapper[4800]: E1125 15:38:39.787465 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="861a549f-5373-4d45-befd-3859dbfdc705" containerName="glance-db-sync" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.787687 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="861a549f-5373-4d45-befd-3859dbfdc705" containerName="glance-db-sync" Nov 25 15:38:39 crc kubenswrapper[4800]: E1125 15:38:39.787789 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34d241a1-0cf3-4b95-933c-4d05d3d98929" containerName="init" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.787875 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="34d241a1-0cf3-4b95-933c-4d05d3d98929" containerName="init" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.788175 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="861a549f-5373-4d45-befd-3859dbfdc705" containerName="glance-db-sync" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.788259 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="34d241a1-0cf3-4b95-933c-4d05d3d98929" containerName="init" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.790202 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.842662 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f8f5cc67-jq58c"] Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.855063 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-dns-svc\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.855122 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-949mb\" (UniqueName: \"kubernetes.io/projected/4fb201a0-5816-4233-a048-40b018b1ad05-kube-api-access-949mb\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.855199 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-ovsdbserver-nb\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.855248 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-ovsdbserver-sb\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.855269 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-config\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.957030 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-ovsdbserver-nb\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.957138 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-ovsdbserver-sb\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.957165 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-config\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.957207 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-dns-svc\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.957231 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-949mb\" (UniqueName: \"kubernetes.io/projected/4fb201a0-5816-4233-a048-40b018b1ad05-kube-api-access-949mb\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.958697 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-ovsdbserver-sb\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.959555 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-ovsdbserver-nb\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.960736 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-config\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.961199 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-dns-svc\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:39 crc kubenswrapper[4800]: I1125 15:38:39.988204 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-949mb\" (UniqueName: \"kubernetes.io/projected/4fb201a0-5816-4233-a048-40b018b1ad05-kube-api-access-949mb\") pod \"dnsmasq-dns-7f8f5cc67-jq58c\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:40 crc kubenswrapper[4800]: I1125 15:38:40.158785 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:38:40 crc kubenswrapper[4800]: I1125 15:38:40.360192 4800 generic.go:334] "Generic (PLEG): container finished" podID="2278309e-6b47-4375-b433-55a0c80ef751" containerID="acd0b4ebc8a0b83be0223d88fc94eec701c5104bbfffc21a72c1d40dde269d66" exitCode=0 Nov 25 15:38:40 crc kubenswrapper[4800]: I1125 15:38:40.360604 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" event={"ID":"2278309e-6b47-4375-b433-55a0c80ef751","Type":"ContainerDied","Data":"acd0b4ebc8a0b83be0223d88fc94eec701c5104bbfffc21a72c1d40dde269d66"} Nov 25 15:38:41 crc kubenswrapper[4800]: I1125 15:38:41.383434 4800 generic.go:334] "Generic (PLEG): container finished" podID="6d47c54e-ce18-4454-944a-d2cf0df370a8" containerID="4841443f209fa509e5de46b7e1e5425f75fb687ac606a5fc0b1d36c07dab811f" exitCode=0 Nov 25 15:38:41 crc kubenswrapper[4800]: I1125 15:38:41.383485 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-n4hqc" event={"ID":"6d47c54e-ce18-4454-944a-d2cf0df370a8","Type":"ContainerDied","Data":"4841443f209fa509e5de46b7e1e5425f75fb687ac606a5fc0b1d36c07dab811f"} Nov 25 15:38:44 crc kubenswrapper[4800]: I1125 15:38:44.581205 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" podUID="2278309e-6b47-4375-b433-55a0c80ef751" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.134:5353: connect: connection refused" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.274125 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5497646597-g5gld"] Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.314260 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7c78ff894b-2g5wf"] Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.339739 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.341944 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7c78ff894b-2g5wf"] Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.350814 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.453057 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e341748-e3fe-4c2d-933e-fdea97ee66b6-logs\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.453170 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-horizon-secret-key\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.453202 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0e341748-e3fe-4c2d-933e-fdea97ee66b6-config-data\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.453250 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pw6xd\" (UniqueName: \"kubernetes.io/projected/0e341748-e3fe-4c2d-933e-fdea97ee66b6-kube-api-access-pw6xd\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.453324 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e341748-e3fe-4c2d-933e-fdea97ee66b6-scripts\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.453385 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-combined-ca-bundle\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.453410 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-horizon-tls-certs\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.477512 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-78665cb57c-77xrd"] Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.533957 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-56fb8dbc98-w4xzj"] Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.536209 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.549641 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-56fb8dbc98-w4xzj"] Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.557141 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-combined-ca-bundle\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.557205 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-horizon-tls-certs\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.557251 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e341748-e3fe-4c2d-933e-fdea97ee66b6-logs\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.557317 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-horizon-secret-key\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.557344 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0e341748-e3fe-4c2d-933e-fdea97ee66b6-config-data\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.557371 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pw6xd\" (UniqueName: \"kubernetes.io/projected/0e341748-e3fe-4c2d-933e-fdea97ee66b6-kube-api-access-pw6xd\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.557402 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e341748-e3fe-4c2d-933e-fdea97ee66b6-scripts\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.558285 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e341748-e3fe-4c2d-933e-fdea97ee66b6-scripts\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.565064 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0e341748-e3fe-4c2d-933e-fdea97ee66b6-config-data\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.565478 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e341748-e3fe-4c2d-933e-fdea97ee66b6-logs\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.596731 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-horizon-tls-certs\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.597067 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-horizon-secret-key\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.597613 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-combined-ca-bundle\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.603922 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pw6xd\" (UniqueName: \"kubernetes.io/projected/0e341748-e3fe-4c2d-933e-fdea97ee66b6-kube-api-access-pw6xd\") pod \"horizon-7c78ff894b-2g5wf\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.659504 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-horizon-tls-certs\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.659565 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-logs\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.659611 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-combined-ca-bundle\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.659641 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-horizon-secret-key\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.659673 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz87p\" (UniqueName: \"kubernetes.io/projected/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-kube-api-access-lz87p\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.659704 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-scripts\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.659736 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-config-data\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.680349 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.761904 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-config-data\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.762026 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-horizon-tls-certs\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.762049 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-logs\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.762087 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-combined-ca-bundle\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.762114 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-horizon-secret-key\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.762151 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz87p\" (UniqueName: \"kubernetes.io/projected/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-kube-api-access-lz87p\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.762182 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-scripts\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.762984 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-scripts\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.763993 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-config-data\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.765836 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-logs\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.768829 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-horizon-secret-key\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.769573 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-combined-ca-bundle\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.783880 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-horizon-tls-certs\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.802550 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz87p\" (UniqueName: \"kubernetes.io/projected/bc0a04ce-9c18-468e-a9bb-7f8ab46f176d-kube-api-access-lz87p\") pod \"horizon-56fb8dbc98-w4xzj\" (UID: \"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d\") " pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:47 crc kubenswrapper[4800]: I1125 15:38:47.868225 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.362393 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.478413 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-combined-ca-bundle\") pod \"6d47c54e-ce18-4454-944a-d2cf0df370a8\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.478650 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hn726\" (UniqueName: \"kubernetes.io/projected/6d47c54e-ce18-4454-944a-d2cf0df370a8-kube-api-access-hn726\") pod \"6d47c54e-ce18-4454-944a-d2cf0df370a8\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.478722 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-credential-keys\") pod \"6d47c54e-ce18-4454-944a-d2cf0df370a8\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.478948 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-scripts\") pod \"6d47c54e-ce18-4454-944a-d2cf0df370a8\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.479066 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-config-data\") pod \"6d47c54e-ce18-4454-944a-d2cf0df370a8\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.479090 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-fernet-keys\") pod \"6d47c54e-ce18-4454-944a-d2cf0df370a8\" (UID: \"6d47c54e-ce18-4454-944a-d2cf0df370a8\") " Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.486515 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6d47c54e-ce18-4454-944a-d2cf0df370a8" (UID: "6d47c54e-ce18-4454-944a-d2cf0df370a8"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.486537 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6d47c54e-ce18-4454-944a-d2cf0df370a8" (UID: "6d47c54e-ce18-4454-944a-d2cf0df370a8"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.486881 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d47c54e-ce18-4454-944a-d2cf0df370a8-kube-api-access-hn726" (OuterVolumeSpecName: "kube-api-access-hn726") pod "6d47c54e-ce18-4454-944a-d2cf0df370a8" (UID: "6d47c54e-ce18-4454-944a-d2cf0df370a8"). InnerVolumeSpecName "kube-api-access-hn726". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.486971 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-scripts" (OuterVolumeSpecName: "scripts") pod "6d47c54e-ce18-4454-944a-d2cf0df370a8" (UID: "6d47c54e-ce18-4454-944a-d2cf0df370a8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.489810 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-n4hqc" event={"ID":"6d47c54e-ce18-4454-944a-d2cf0df370a8","Type":"ContainerDied","Data":"d082809fcd4b3cec8347ce54805f6dc25b3028ffeb10185b0ee0ee25dde82143"} Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.489863 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d082809fcd4b3cec8347ce54805f6dc25b3028ffeb10185b0ee0ee25dde82143" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.489896 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-n4hqc" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.516110 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-config-data" (OuterVolumeSpecName: "config-data") pod "6d47c54e-ce18-4454-944a-d2cf0df370a8" (UID: "6d47c54e-ce18-4454-944a-d2cf0df370a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.519087 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d47c54e-ce18-4454-944a-d2cf0df370a8" (UID: "6d47c54e-ce18-4454-944a-d2cf0df370a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.585434 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hn726\" (UniqueName: \"kubernetes.io/projected/6d47c54e-ce18-4454-944a-d2cf0df370a8-kube-api-access-hn726\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.585489 4800 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.585505 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.585517 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.585529 4800 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:48 crc kubenswrapper[4800]: I1125 15:38:48.585540 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d47c54e-ce18-4454-944a-d2cf0df370a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.455326 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-n4hqc"] Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.467005 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-n4hqc"] Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.562399 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-vvflf"] Nov 25 15:38:49 crc kubenswrapper[4800]: E1125 15:38:49.563001 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d47c54e-ce18-4454-944a-d2cf0df370a8" containerName="keystone-bootstrap" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.563026 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d47c54e-ce18-4454-944a-d2cf0df370a8" containerName="keystone-bootstrap" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.563288 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d47c54e-ce18-4454-944a-d2cf0df370a8" containerName="keystone-bootstrap" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.564089 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.575546 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.575680 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.577774 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.577802 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.577945 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-p5fqm" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.580235 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-vvflf"] Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.707904 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-config-data\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.707984 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-credential-keys\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.708026 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-scripts\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.708066 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-combined-ca-bundle\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.708098 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-fernet-keys\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.708132 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lpfc\" (UniqueName: \"kubernetes.io/projected/7a8a8ce2-c939-4626-b487-022750cd3090-kube-api-access-6lpfc\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.798273 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d47c54e-ce18-4454-944a-d2cf0df370a8" path="/var/lib/kubelet/pods/6d47c54e-ce18-4454-944a-d2cf0df370a8/volumes" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.810947 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-credential-keys\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.810998 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-scripts\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.811032 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-combined-ca-bundle\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.811060 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-fernet-keys\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.811104 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lpfc\" (UniqueName: \"kubernetes.io/projected/7a8a8ce2-c939-4626-b487-022750cd3090-kube-api-access-6lpfc\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.811312 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-config-data\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.818183 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-fernet-keys\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.818302 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-combined-ca-bundle\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.828069 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-scripts\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.828596 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-credential-keys\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.828888 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-config-data\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.835643 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lpfc\" (UniqueName: \"kubernetes.io/projected/7a8a8ce2-c939-4626-b487-022750cd3090-kube-api-access-6lpfc\") pod \"keystone-bootstrap-vvflf\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:49 crc kubenswrapper[4800]: I1125 15:38:49.907645 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:38:50 crc kubenswrapper[4800]: E1125 15:38:50.199755 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api@sha256:7dd2e0dbb6bb5a6cecd1763e43479ca8cb6a0c502534e83c8795c0da2b50e099" Nov 25 15:38:50 crc kubenswrapper[4800]: E1125 15:38:50.200106 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api@sha256:7dd2e0dbb6bb5a6cecd1763e43479ca8cb6a0c502534e83c8795c0da2b50e099,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ff2s2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-7w7c7_openstack(8e726809-c215-4d1a-95a3-d0fadede3cca): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:38:50 crc kubenswrapper[4800]: E1125 15:38:50.203083 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-7w7c7" podUID="8e726809-c215-4d1a-95a3-d0fadede3cca" Nov 25 15:38:50 crc kubenswrapper[4800]: E1125 15:38:50.516961 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api@sha256:7dd2e0dbb6bb5a6cecd1763e43479ca8cb6a0c502534e83c8795c0da2b50e099\\\"\"" pod="openstack/placement-db-sync-7w7c7" podUID="8e726809-c215-4d1a-95a3-d0fadede3cca" Nov 25 15:38:50 crc kubenswrapper[4800]: E1125 15:38:50.641704 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140" Nov 25 15:38:50 crc kubenswrapper[4800]: E1125 15:38:50.641939 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n64bhfdh5d5h656h666h58fh6dh69hf8hc4h66dhd4h67dh597h5d7hc8h64dh657h54dh569h546hcfh584hb5h597h554h585h554h5b6h695h69h5b8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dkfsl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(445ab78d-8d8a-4f60-8daa-3fab07b4dfa8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:38:54 crc kubenswrapper[4800]: I1125 15:38:54.576267 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" podUID="2278309e-6b47-4375-b433-55a0c80ef751" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.134:5353: i/o timeout" Nov 25 15:38:59 crc kubenswrapper[4800]: I1125 15:38:59.576744 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" podUID="2278309e-6b47-4375-b433-55a0c80ef751" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.134:5353: i/o timeout" Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.689270 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" event={"ID":"2278309e-6b47-4375-b433-55a0c80ef751","Type":"ContainerDied","Data":"99d2400df918ce28162eede650d7e219f8a0bb7608409c87fb601820dff73a17"} Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.690149 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99d2400df918ce28162eede650d7e219f8a0bb7608409c87fb601820dff73a17" Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.795467 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.901116 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-dns-svc\") pod \"2278309e-6b47-4375-b433-55a0c80ef751\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.901279 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-ovsdbserver-sb\") pod \"2278309e-6b47-4375-b433-55a0c80ef751\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.901310 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-config\") pod \"2278309e-6b47-4375-b433-55a0c80ef751\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.901348 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-ovsdbserver-nb\") pod \"2278309e-6b47-4375-b433-55a0c80ef751\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.901590 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmjd8\" (UniqueName: \"kubernetes.io/projected/2278309e-6b47-4375-b433-55a0c80ef751-kube-api-access-gmjd8\") pod \"2278309e-6b47-4375-b433-55a0c80ef751\" (UID: \"2278309e-6b47-4375-b433-55a0c80ef751\") " Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.908662 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2278309e-6b47-4375-b433-55a0c80ef751-kube-api-access-gmjd8" (OuterVolumeSpecName: "kube-api-access-gmjd8") pod "2278309e-6b47-4375-b433-55a0c80ef751" (UID: "2278309e-6b47-4375-b433-55a0c80ef751"). InnerVolumeSpecName "kube-api-access-gmjd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.965943 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-config" (OuterVolumeSpecName: "config") pod "2278309e-6b47-4375-b433-55a0c80ef751" (UID: "2278309e-6b47-4375-b433-55a0c80ef751"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.967159 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2278309e-6b47-4375-b433-55a0c80ef751" (UID: "2278309e-6b47-4375-b433-55a0c80ef751"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.977534 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2278309e-6b47-4375-b433-55a0c80ef751" (UID: "2278309e-6b47-4375-b433-55a0c80ef751"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:03 crc kubenswrapper[4800]: I1125 15:39:03.996511 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2278309e-6b47-4375-b433-55a0c80ef751" (UID: "2278309e-6b47-4375-b433-55a0c80ef751"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:04 crc kubenswrapper[4800]: I1125 15:39:04.005091 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmjd8\" (UniqueName: \"kubernetes.io/projected/2278309e-6b47-4375-b433-55a0c80ef751-kube-api-access-gmjd8\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:04 crc kubenswrapper[4800]: I1125 15:39:04.005130 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:04 crc kubenswrapper[4800]: I1125 15:39:04.005156 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:04 crc kubenswrapper[4800]: I1125 15:39:04.005169 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:04 crc kubenswrapper[4800]: I1125 15:39:04.005178 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2278309e-6b47-4375-b433-55a0c80ef751-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:04 crc kubenswrapper[4800]: I1125 15:39:04.578047 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" podUID="2278309e-6b47-4375-b433-55a0c80ef751" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.134:5353: i/o timeout" Nov 25 15:39:04 crc kubenswrapper[4800]: I1125 15:39:04.698488 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-558cd77c67-jcqhf" Nov 25 15:39:04 crc kubenswrapper[4800]: I1125 15:39:04.746328 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-558cd77c67-jcqhf"] Nov 25 15:39:04 crc kubenswrapper[4800]: I1125 15:39:04.755731 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-558cd77c67-jcqhf"] Nov 25 15:39:05 crc kubenswrapper[4800]: E1125 15:39:05.031425 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:4c93a5cccb9971e24f05daf93b3aa11ba71752bc3469a1a1a2c4906f92f69645" Nov 25 15:39:05 crc kubenswrapper[4800]: E1125 15:39:05.031696 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:4c93a5cccb9971e24f05daf93b3aa11ba71752bc3469a1a1a2c4906f92f69645,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-67jvq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-shvrf_openstack(b030f9b2-f92c-40d4-b92a-7c99d4af8358): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:39:05 crc kubenswrapper[4800]: E1125 15:39:05.032960 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-shvrf" podUID="b030f9b2-f92c-40d4-b92a-7c99d4af8358" Nov 25 15:39:05 crc kubenswrapper[4800]: E1125 15:39:05.713755 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:4c93a5cccb9971e24f05daf93b3aa11ba71752bc3469a1a1a2c4906f92f69645\\\"\"" pod="openstack/barbican-db-sync-shvrf" podUID="b030f9b2-f92c-40d4-b92a-7c99d4af8358" Nov 25 15:39:05 crc kubenswrapper[4800]: I1125 15:39:05.800235 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2278309e-6b47-4375-b433-55a0c80ef751" path="/var/lib/kubelet/pods/2278309e-6b47-4375-b433-55a0c80ef751/volumes" Nov 25 15:39:07 crc kubenswrapper[4800]: E1125 15:39:07.146725 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879" Nov 25 15:39:07 crc kubenswrapper[4800]: E1125 15:39:07.147644 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9fjlr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-2d48v_openstack(15830745-aef8-4482-8885-6a5969795af6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 15:39:07 crc kubenswrapper[4800]: E1125 15:39:07.148964 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-2d48v" podUID="15830745-aef8-4482-8885-6a5969795af6" Nov 25 15:39:07 crc kubenswrapper[4800]: E1125 15:39:07.733205 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879\\\"\"" pod="openstack/cinder-db-sync-2d48v" podUID="15830745-aef8-4482-8885-6a5969795af6" Nov 25 15:39:07 crc kubenswrapper[4800]: I1125 15:39:07.901070 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f8f5cc67-jq58c"] Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.146676 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-56fb8dbc98-w4xzj"] Nov 25 15:39:08 crc kubenswrapper[4800]: W1125 15:39:08.146992 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbc0a04ce_9c18_468e_a9bb_7f8ab46f176d.slice/crio-acef2e63c523e51d14bace975d7bae19da78981577055514cf61c678b71bae9e WatchSource:0}: Error finding container acef2e63c523e51d14bace975d7bae19da78981577055514cf61c678b71bae9e: Status 404 returned error can't find the container with id acef2e63c523e51d14bace975d7bae19da78981577055514cf61c678b71bae9e Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.429603 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7c78ff894b-2g5wf"] Nov 25 15:39:08 crc kubenswrapper[4800]: W1125 15:39:08.436537 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0e341748_e3fe_4c2d_933e_fdea97ee66b6.slice/crio-4ab4e3c92895bc425cdde8fde98d58a06eb28289a36c3c85a7424de92591d0b4 WatchSource:0}: Error finding container 4ab4e3c92895bc425cdde8fde98d58a06eb28289a36c3c85a7424de92591d0b4: Status 404 returned error can't find the container with id 4ab4e3c92895bc425cdde8fde98d58a06eb28289a36c3c85a7424de92591d0b4 Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.495624 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-vvflf"] Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.762270 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-54cdc54fcf-c45jx" event={"ID":"d3674fdb-30d8-402d-b9a7-419574d7a0c9","Type":"ContainerStarted","Data":"8a6c26c71711ce0333d299c0718aef99e8302ab268415a396bdc6386c729739a"} Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.773024 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-vvflf" event={"ID":"7a8a8ce2-c939-4626-b487-022750cd3090","Type":"ContainerStarted","Data":"35a13c468accb749e4e02a580dae2abc551c4bfcf7491c2492d459a306e80e83"} Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.775520 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78665cb57c-77xrd" event={"ID":"c2f9ee92-fddb-49cf-bb5c-de3435545b92","Type":"ContainerStarted","Data":"f59a6fc77f3bd1f7e2f580a20fdc945aaa84ebb563b1085aa79ac0ad365421bb"} Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.777718 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-7w7c7" event={"ID":"8e726809-c215-4d1a-95a3-d0fadede3cca","Type":"ContainerStarted","Data":"98ef89cabf311e36ec55a79fd9be06b9bdafd54d8aac03c098e384cf70c713ef"} Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.787219 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56fb8dbc98-w4xzj" event={"ID":"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d","Type":"ContainerStarted","Data":"f228ee7bb342f0d7bfca3f32a0c43a34db541de33a26cebd17f73777c768f50e"} Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.787632 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56fb8dbc98-w4xzj" event={"ID":"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d","Type":"ContainerStarted","Data":"acef2e63c523e51d14bace975d7bae19da78981577055514cf61c678b71bae9e"} Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.791449 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5497646597-g5gld" event={"ID":"bdf98904-e8fc-4c69-9dc7-5e522c269236","Type":"ContainerStarted","Data":"433241a89db52601539e3d6a79ced5330c09547a33c578b33156142467170def"} Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.793816 4800 generic.go:334] "Generic (PLEG): container finished" podID="4fb201a0-5816-4233-a048-40b018b1ad05" containerID="7d04d63feaffcf605df56834e3f9144df0aa6f7835f80afa1d7928242eb852ae" exitCode=0 Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.793941 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" event={"ID":"4fb201a0-5816-4233-a048-40b018b1ad05","Type":"ContainerDied","Data":"7d04d63feaffcf605df56834e3f9144df0aa6f7835f80afa1d7928242eb852ae"} Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.794023 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" event={"ID":"4fb201a0-5816-4233-a048-40b018b1ad05","Type":"ContainerStarted","Data":"a1091d133f4b1dd5f980ef6d4df2e7c6bb8ff304aff1c43bbb819e7d59ff9445"} Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.799588 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7c78ff894b-2g5wf" event={"ID":"0e341748-e3fe-4c2d-933e-fdea97ee66b6","Type":"ContainerStarted","Data":"4ab4e3c92895bc425cdde8fde98d58a06eb28289a36c3c85a7424de92591d0b4"} Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.801710 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8","Type":"ContainerStarted","Data":"2e92aa247f5af8970ac02e53900aa1757ae3a0803117d919a059b06bac2e6823"} Nov 25 15:39:08 crc kubenswrapper[4800]: I1125 15:39:08.806232 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-7w7c7" podStartSLOduration=3.077283408 podStartE2EDuration="35.806219817s" podCreationTimestamp="2025-11-25 15:38:33 +0000 UTC" firstStartedPulling="2025-11-25 15:38:35.097150615 +0000 UTC m=+1276.151559097" lastFinishedPulling="2025-11-25 15:39:07.826087024 +0000 UTC m=+1308.880495506" observedRunningTime="2025-11-25 15:39:08.80481579 +0000 UTC m=+1309.859224302" watchObservedRunningTime="2025-11-25 15:39:08.806219817 +0000 UTC m=+1309.860628299" Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.832996 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5497646597-g5gld" event={"ID":"bdf98904-e8fc-4c69-9dc7-5e522c269236","Type":"ContainerStarted","Data":"0581e5e372e3185eb1371e6b2331a60f4dd9b0ecbdfa4e8f80a28c89b503aee8"} Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.833781 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5497646597-g5gld" podUID="bdf98904-e8fc-4c69-9dc7-5e522c269236" containerName="horizon-log" containerID="cri-o://433241a89db52601539e3d6a79ced5330c09547a33c578b33156142467170def" gracePeriod=30 Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.833927 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5497646597-g5gld" podUID="bdf98904-e8fc-4c69-9dc7-5e522c269236" containerName="horizon" containerID="cri-o://0581e5e372e3185eb1371e6b2331a60f4dd9b0ecbdfa4e8f80a28c89b503aee8" gracePeriod=30 Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.841651 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78665cb57c-77xrd" event={"ID":"c2f9ee92-fddb-49cf-bb5c-de3435545b92","Type":"ContainerStarted","Data":"f6cfeb997f3cbaeea0d830c43aaa2898fbee56bdd2a9022133548bbd07439c61"} Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.842171 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-78665cb57c-77xrd" podUID="c2f9ee92-fddb-49cf-bb5c-de3435545b92" containerName="horizon-log" containerID="cri-o://f59a6fc77f3bd1f7e2f580a20fdc945aaa84ebb563b1085aa79ac0ad365421bb" gracePeriod=30 Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.842240 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-78665cb57c-77xrd" podUID="c2f9ee92-fddb-49cf-bb5c-de3435545b92" containerName="horizon" containerID="cri-o://f6cfeb997f3cbaeea0d830c43aaa2898fbee56bdd2a9022133548bbd07439c61" gracePeriod=30 Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.845167 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-54cdc54fcf-c45jx" event={"ID":"d3674fdb-30d8-402d-b9a7-419574d7a0c9","Type":"ContainerStarted","Data":"e67de3225dbb64bd48217b8d26b37d34399cfe7dd758c0f6f64a9806a8db8ae8"} Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.845478 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-54cdc54fcf-c45jx" podUID="d3674fdb-30d8-402d-b9a7-419574d7a0c9" containerName="horizon-log" containerID="cri-o://8a6c26c71711ce0333d299c0718aef99e8302ab268415a396bdc6386c729739a" gracePeriod=30 Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.845655 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-54cdc54fcf-c45jx" podUID="d3674fdb-30d8-402d-b9a7-419574d7a0c9" containerName="horizon" containerID="cri-o://e67de3225dbb64bd48217b8d26b37d34399cfe7dd758c0f6f64a9806a8db8ae8" gracePeriod=30 Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.856004 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" event={"ID":"4fb201a0-5816-4233-a048-40b018b1ad05","Type":"ContainerStarted","Data":"43c886198ada75d90c0025d8df6d40776e9cb1a55970c31b8099d6e867e12085"} Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.857991 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.878745 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7c78ff894b-2g5wf" event={"ID":"0e341748-e3fe-4c2d-933e-fdea97ee66b6","Type":"ContainerStarted","Data":"d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94"} Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.878809 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7c78ff894b-2g5wf" event={"ID":"0e341748-e3fe-4c2d-933e-fdea97ee66b6","Type":"ContainerStarted","Data":"9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55"} Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.881927 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-vvflf" event={"ID":"7a8a8ce2-c939-4626-b487-022750cd3090","Type":"ContainerStarted","Data":"cda77c859ed115ed0fac15e1ac8a4a8fbe21eda6d07686adc9effa36f72d5781"} Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.890078 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56fb8dbc98-w4xzj" event={"ID":"bc0a04ce-9c18-468e-a9bb-7f8ab46f176d","Type":"ContainerStarted","Data":"4a2290f7376aaad62401bb375b4f0abcdd99827198ec074ff0fe5acc581ab7ee"} Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.937051 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-78665cb57c-77xrd" podStartSLOduration=3.678219983 podStartE2EDuration="33.937023207s" podCreationTimestamp="2025-11-25 15:38:36 +0000 UTC" firstStartedPulling="2025-11-25 15:38:37.565216734 +0000 UTC m=+1278.619625216" lastFinishedPulling="2025-11-25 15:39:07.824019958 +0000 UTC m=+1308.878428440" observedRunningTime="2025-11-25 15:39:09.92829459 +0000 UTC m=+1310.982703072" watchObservedRunningTime="2025-11-25 15:39:09.937023207 +0000 UTC m=+1310.991431689" Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.938684 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5497646597-g5gld" podStartSLOduration=3.950281338 podStartE2EDuration="35.938678382s" podCreationTimestamp="2025-11-25 15:38:34 +0000 UTC" firstStartedPulling="2025-11-25 15:38:35.378510521 +0000 UTC m=+1276.432919003" lastFinishedPulling="2025-11-25 15:39:07.366907555 +0000 UTC m=+1308.421316047" observedRunningTime="2025-11-25 15:39:09.905263566 +0000 UTC m=+1310.959672038" watchObservedRunningTime="2025-11-25 15:39:09.938678382 +0000 UTC m=+1310.993086864" Nov 25 15:39:09 crc kubenswrapper[4800]: I1125 15:39:09.983175 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-54cdc54fcf-c45jx" podStartSLOduration=4.302570925 podStartE2EDuration="36.983139626s" podCreationTimestamp="2025-11-25 15:38:33 +0000 UTC" firstStartedPulling="2025-11-25 15:38:35.093677427 +0000 UTC m=+1276.148085909" lastFinishedPulling="2025-11-25 15:39:07.774246128 +0000 UTC m=+1308.828654610" observedRunningTime="2025-11-25 15:39:09.971870381 +0000 UTC m=+1311.026278863" watchObservedRunningTime="2025-11-25 15:39:09.983139626 +0000 UTC m=+1311.037548108" Nov 25 15:39:10 crc kubenswrapper[4800]: I1125 15:39:10.063378 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" podStartSLOduration=31.063339221 podStartE2EDuration="31.063339221s" podCreationTimestamp="2025-11-25 15:38:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:09.999569802 +0000 UTC m=+1311.053978284" watchObservedRunningTime="2025-11-25 15:39:10.063339221 +0000 UTC m=+1311.117747703" Nov 25 15:39:10 crc kubenswrapper[4800]: I1125 15:39:10.079926 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7c78ff894b-2g5wf" podStartSLOduration=23.07989735 podStartE2EDuration="23.07989735s" podCreationTimestamp="2025-11-25 15:38:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:10.037360907 +0000 UTC m=+1311.091769389" watchObservedRunningTime="2025-11-25 15:39:10.07989735 +0000 UTC m=+1311.134305832" Nov 25 15:39:10 crc kubenswrapper[4800]: I1125 15:39:10.117205 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-vvflf" podStartSLOduration=21.117174091 podStartE2EDuration="21.117174091s" podCreationTimestamp="2025-11-25 15:38:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:10.057057721 +0000 UTC m=+1311.111466203" watchObservedRunningTime="2025-11-25 15:39:10.117174091 +0000 UTC m=+1311.171582573" Nov 25 15:39:10 crc kubenswrapper[4800]: I1125 15:39:10.165128 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-56fb8dbc98-w4xzj" podStartSLOduration=23.16508936 podStartE2EDuration="23.16508936s" podCreationTimestamp="2025-11-25 15:38:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:10.082280645 +0000 UTC m=+1311.136689127" watchObservedRunningTime="2025-11-25 15:39:10.16508936 +0000 UTC m=+1311.219497842" Nov 25 15:39:14 crc kubenswrapper[4800]: I1125 15:39:14.080605 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:39:14 crc kubenswrapper[4800]: I1125 15:39:14.596870 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5497646597-g5gld" Nov 25 15:39:15 crc kubenswrapper[4800]: I1125 15:39:15.161179 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:39:15 crc kubenswrapper[4800]: I1125 15:39:15.222406 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-5rdjt"] Nov 25 15:39:15 crc kubenswrapper[4800]: I1125 15:39:15.222737 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" podUID="84bf7ad1-699f-4ba0-a3ce-75e46a590646" containerName="dnsmasq-dns" containerID="cri-o://3198b91d6c6eb6bcef3240f08a53bfda03ec14319b9627db88bc4570be9b2ad6" gracePeriod=10 Nov 25 15:39:15 crc kubenswrapper[4800]: I1125 15:39:15.972935 4800 generic.go:334] "Generic (PLEG): container finished" podID="84bf7ad1-699f-4ba0-a3ce-75e46a590646" containerID="3198b91d6c6eb6bcef3240f08a53bfda03ec14319b9627db88bc4570be9b2ad6" exitCode=0 Nov 25 15:39:15 crc kubenswrapper[4800]: I1125 15:39:15.972984 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" event={"ID":"84bf7ad1-699f-4ba0-a3ce-75e46a590646","Type":"ContainerDied","Data":"3198b91d6c6eb6bcef3240f08a53bfda03ec14319b9627db88bc4570be9b2ad6"} Nov 25 15:39:16 crc kubenswrapper[4800]: I1125 15:39:16.912877 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:39:16 crc kubenswrapper[4800]: I1125 15:39:16.984188 4800 generic.go:334] "Generic (PLEG): container finished" podID="7a8a8ce2-c939-4626-b487-022750cd3090" containerID="cda77c859ed115ed0fac15e1ac8a4a8fbe21eda6d07686adc9effa36f72d5781" exitCode=0 Nov 25 15:39:16 crc kubenswrapper[4800]: I1125 15:39:16.984361 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-vvflf" event={"ID":"7a8a8ce2-c939-4626-b487-022750cd3090","Type":"ContainerDied","Data":"cda77c859ed115ed0fac15e1ac8a4a8fbe21eda6d07686adc9effa36f72d5781"} Nov 25 15:39:17 crc kubenswrapper[4800]: I1125 15:39:17.681851 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:39:17 crc kubenswrapper[4800]: I1125 15:39:17.682264 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:39:17 crc kubenswrapper[4800]: I1125 15:39:17.880339 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:39:17 crc kubenswrapper[4800]: I1125 15:39:17.881296 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:39:17 crc kubenswrapper[4800]: I1125 15:39:17.926545 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" podUID="84bf7ad1-699f-4ba0-a3ce-75e46a590646" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.109:5353: connect: connection refused" Nov 25 15:39:18 crc kubenswrapper[4800]: I1125 15:39:18.026908 4800 generic.go:334] "Generic (PLEG): container finished" podID="8e726809-c215-4d1a-95a3-d0fadede3cca" containerID="98ef89cabf311e36ec55a79fd9be06b9bdafd54d8aac03c098e384cf70c713ef" exitCode=0 Nov 25 15:39:18 crc kubenswrapper[4800]: I1125 15:39:18.028071 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-7w7c7" event={"ID":"8e726809-c215-4d1a-95a3-d0fadede3cca","Type":"ContainerDied","Data":"98ef89cabf311e36ec55a79fd9be06b9bdafd54d8aac03c098e384cf70c713ef"} Nov 25 15:39:18 crc kubenswrapper[4800]: I1125 15:39:18.350184 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:39:18 crc kubenswrapper[4800]: I1125 15:39:18.473428 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6kbd\" (UniqueName: \"kubernetes.io/projected/84bf7ad1-699f-4ba0-a3ce-75e46a590646-kube-api-access-q6kbd\") pod \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " Nov 25 15:39:18 crc kubenswrapper[4800]: I1125 15:39:18.473488 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-ovsdbserver-sb\") pod \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " Nov 25 15:39:18 crc kubenswrapper[4800]: I1125 15:39:18.473639 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-ovsdbserver-nb\") pod \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " Nov 25 15:39:18 crc kubenswrapper[4800]: I1125 15:39:18.473660 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-dns-svc\") pod \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " Nov 25 15:39:18 crc kubenswrapper[4800]: I1125 15:39:18.473736 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-config\") pod \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\" (UID: \"84bf7ad1-699f-4ba0-a3ce-75e46a590646\") " Nov 25 15:39:18 crc kubenswrapper[4800]: I1125 15:39:18.494121 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84bf7ad1-699f-4ba0-a3ce-75e46a590646-kube-api-access-q6kbd" (OuterVolumeSpecName: "kube-api-access-q6kbd") pod "84bf7ad1-699f-4ba0-a3ce-75e46a590646" (UID: "84bf7ad1-699f-4ba0-a3ce-75e46a590646"). InnerVolumeSpecName "kube-api-access-q6kbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.580272 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6kbd\" (UniqueName: \"kubernetes.io/projected/84bf7ad1-699f-4ba0-a3ce-75e46a590646-kube-api-access-q6kbd\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.640186 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.645629 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "84bf7ad1-699f-4ba0-a3ce-75e46a590646" (UID: "84bf7ad1-699f-4ba0-a3ce-75e46a590646"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.648026 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-config" (OuterVolumeSpecName: "config") pod "84bf7ad1-699f-4ba0-a3ce-75e46a590646" (UID: "84bf7ad1-699f-4ba0-a3ce-75e46a590646"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.654690 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "84bf7ad1-699f-4ba0-a3ce-75e46a590646" (UID: "84bf7ad1-699f-4ba0-a3ce-75e46a590646"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.655445 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "84bf7ad1-699f-4ba0-a3ce-75e46a590646" (UID: "84bf7ad1-699f-4ba0-a3ce-75e46a590646"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.687913 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.687964 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.687978 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.687994 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84bf7ad1-699f-4ba0-a3ce-75e46a590646-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.789281 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-credential-keys\") pod \"7a8a8ce2-c939-4626-b487-022750cd3090\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.789362 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-combined-ca-bundle\") pod \"7a8a8ce2-c939-4626-b487-022750cd3090\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.789406 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-config-data\") pod \"7a8a8ce2-c939-4626-b487-022750cd3090\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.789503 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-scripts\") pod \"7a8a8ce2-c939-4626-b487-022750cd3090\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.789544 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lpfc\" (UniqueName: \"kubernetes.io/projected/7a8a8ce2-c939-4626-b487-022750cd3090-kube-api-access-6lpfc\") pod \"7a8a8ce2-c939-4626-b487-022750cd3090\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.789607 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-fernet-keys\") pod \"7a8a8ce2-c939-4626-b487-022750cd3090\" (UID: \"7a8a8ce2-c939-4626-b487-022750cd3090\") " Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.794803 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-scripts" (OuterVolumeSpecName: "scripts") pod "7a8a8ce2-c939-4626-b487-022750cd3090" (UID: "7a8a8ce2-c939-4626-b487-022750cd3090"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.794887 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a8a8ce2-c939-4626-b487-022750cd3090-kube-api-access-6lpfc" (OuterVolumeSpecName: "kube-api-access-6lpfc") pod "7a8a8ce2-c939-4626-b487-022750cd3090" (UID: "7a8a8ce2-c939-4626-b487-022750cd3090"). InnerVolumeSpecName "kube-api-access-6lpfc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.795142 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7a8a8ce2-c939-4626-b487-022750cd3090" (UID: "7a8a8ce2-c939-4626-b487-022750cd3090"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.795161 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "7a8a8ce2-c939-4626-b487-022750cd3090" (UID: "7a8a8ce2-c939-4626-b487-022750cd3090"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.822939 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a8a8ce2-c939-4626-b487-022750cd3090" (UID: "7a8a8ce2-c939-4626-b487-022750cd3090"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.833918 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-config-data" (OuterVolumeSpecName: "config-data") pod "7a8a8ce2-c939-4626-b487-022750cd3090" (UID: "7a8a8ce2-c939-4626-b487-022750cd3090"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.893802 4800 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.893833 4800 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.893867 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.893876 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.893884 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a8a8ce2-c939-4626-b487-022750cd3090-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:18.893892 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lpfc\" (UniqueName: \"kubernetes.io/projected/7a8a8ce2-c939-4626-b487-022750cd3090-kube-api-access-6lpfc\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.037146 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" event={"ID":"84bf7ad1-699f-4ba0-a3ce-75e46a590646","Type":"ContainerDied","Data":"3a8e7fcdf30c45b29e78997c185b555c939e658e33b86460e101b82b1c489fbe"} Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.037202 4800 scope.go:117] "RemoveContainer" containerID="3198b91d6c6eb6bcef3240f08a53bfda03ec14319b9627db88bc4570be9b2ad6" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.037346 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-5rdjt" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.048594 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8","Type":"ContainerStarted","Data":"ffdc5ebf574e50e11ff0566e05670dcabd2d824fd1a572875a75f61fe94a806f"} Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.065225 4800 scope.go:117] "RemoveContainer" containerID="77be9ba1326915a0807b0ff1ceef231623abab09e9a0c5db006a92c2a0c2308e" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.066018 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-vvflf" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.066071 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-vvflf" event={"ID":"7a8a8ce2-c939-4626-b487-022750cd3090","Type":"ContainerDied","Data":"35a13c468accb749e4e02a580dae2abc551c4bfcf7491c2492d459a306e80e83"} Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.066151 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35a13c468accb749e4e02a580dae2abc551c4bfcf7491c2492d459a306e80e83" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.153912 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-5rdjt"] Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.178171 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-5rdjt"] Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.189894 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7dfbf776bb-kgx2k"] Nov 25 15:39:19 crc kubenswrapper[4800]: E1125 15:39:19.190369 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84bf7ad1-699f-4ba0-a3ce-75e46a590646" containerName="dnsmasq-dns" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.190384 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="84bf7ad1-699f-4ba0-a3ce-75e46a590646" containerName="dnsmasq-dns" Nov 25 15:39:19 crc kubenswrapper[4800]: E1125 15:39:19.190399 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2278309e-6b47-4375-b433-55a0c80ef751" containerName="init" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.190406 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="2278309e-6b47-4375-b433-55a0c80ef751" containerName="init" Nov 25 15:39:19 crc kubenswrapper[4800]: E1125 15:39:19.190436 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2278309e-6b47-4375-b433-55a0c80ef751" containerName="dnsmasq-dns" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.190442 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="2278309e-6b47-4375-b433-55a0c80ef751" containerName="dnsmasq-dns" Nov 25 15:39:19 crc kubenswrapper[4800]: E1125 15:39:19.190459 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a8a8ce2-c939-4626-b487-022750cd3090" containerName="keystone-bootstrap" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.190465 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a8a8ce2-c939-4626-b487-022750cd3090" containerName="keystone-bootstrap" Nov 25 15:39:19 crc kubenswrapper[4800]: E1125 15:39:19.190479 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84bf7ad1-699f-4ba0-a3ce-75e46a590646" containerName="init" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.190485 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="84bf7ad1-699f-4ba0-a3ce-75e46a590646" containerName="init" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.190697 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="84bf7ad1-699f-4ba0-a3ce-75e46a590646" containerName="dnsmasq-dns" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.190722 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="2278309e-6b47-4375-b433-55a0c80ef751" containerName="dnsmasq-dns" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.190733 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a8a8ce2-c939-4626-b487-022750cd3090" containerName="keystone-bootstrap" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.191436 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.197093 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.197282 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.197447 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.197568 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.198044 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.198191 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-p5fqm" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.203436 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7dfbf776bb-kgx2k"] Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.306501 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-combined-ca-bundle\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.306553 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7s772\" (UniqueName: \"kubernetes.io/projected/52ce962a-182b-48b4-96ee-225161f70f29-kube-api-access-7s772\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.306602 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-config-data\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.306636 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-credential-keys\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.306688 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-scripts\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.306728 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-internal-tls-certs\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.306753 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-fernet-keys\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.306791 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-public-tls-certs\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.409321 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-public-tls-certs\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.409392 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-combined-ca-bundle\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.409427 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7s772\" (UniqueName: \"kubernetes.io/projected/52ce962a-182b-48b4-96ee-225161f70f29-kube-api-access-7s772\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.409468 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-config-data\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.409517 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-credential-keys\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.409576 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-scripts\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.409635 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-internal-tls-certs\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.409673 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-fernet-keys\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.416775 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-config-data\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.430231 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-scripts\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.430951 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-public-tls-certs\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.433226 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-credential-keys\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.433728 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-internal-tls-certs\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.434491 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-fernet-keys\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.446369 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7s772\" (UniqueName: \"kubernetes.io/projected/52ce962a-182b-48b4-96ee-225161f70f29-kube-api-access-7s772\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.459349 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ce962a-182b-48b4-96ee-225161f70f29-combined-ca-bundle\") pod \"keystone-7dfbf776bb-kgx2k\" (UID: \"52ce962a-182b-48b4-96ee-225161f70f29\") " pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.559827 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.568159 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-7w7c7" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.724348 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e726809-c215-4d1a-95a3-d0fadede3cca-logs\") pod \"8e726809-c215-4d1a-95a3-d0fadede3cca\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.724933 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e726809-c215-4d1a-95a3-d0fadede3cca-logs" (OuterVolumeSpecName: "logs") pod "8e726809-c215-4d1a-95a3-d0fadede3cca" (UID: "8e726809-c215-4d1a-95a3-d0fadede3cca"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.725051 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-scripts\") pod \"8e726809-c215-4d1a-95a3-d0fadede3cca\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.725129 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-config-data\") pod \"8e726809-c215-4d1a-95a3-d0fadede3cca\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.725193 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-combined-ca-bundle\") pod \"8e726809-c215-4d1a-95a3-d0fadede3cca\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.726246 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ff2s2\" (UniqueName: \"kubernetes.io/projected/8e726809-c215-4d1a-95a3-d0fadede3cca-kube-api-access-ff2s2\") pod \"8e726809-c215-4d1a-95a3-d0fadede3cca\" (UID: \"8e726809-c215-4d1a-95a3-d0fadede3cca\") " Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.728486 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e726809-c215-4d1a-95a3-d0fadede3cca-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.732050 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-scripts" (OuterVolumeSpecName: "scripts") pod "8e726809-c215-4d1a-95a3-d0fadede3cca" (UID: "8e726809-c215-4d1a-95a3-d0fadede3cca"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.734008 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e726809-c215-4d1a-95a3-d0fadede3cca-kube-api-access-ff2s2" (OuterVolumeSpecName: "kube-api-access-ff2s2") pod "8e726809-c215-4d1a-95a3-d0fadede3cca" (UID: "8e726809-c215-4d1a-95a3-d0fadede3cca"). InnerVolumeSpecName "kube-api-access-ff2s2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.778039 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-config-data" (OuterVolumeSpecName: "config-data") pod "8e726809-c215-4d1a-95a3-d0fadede3cca" (UID: "8e726809-c215-4d1a-95a3-d0fadede3cca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.805625 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e726809-c215-4d1a-95a3-d0fadede3cca" (UID: "8e726809-c215-4d1a-95a3-d0fadede3cca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.817028 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84bf7ad1-699f-4ba0-a3ce-75e46a590646" path="/var/lib/kubelet/pods/84bf7ad1-699f-4ba0-a3ce-75e46a590646/volumes" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.831127 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ff2s2\" (UniqueName: \"kubernetes.io/projected/8e726809-c215-4d1a-95a3-d0fadede3cca-kube-api-access-ff2s2\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.831179 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.831193 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:19 crc kubenswrapper[4800]: I1125 15:39:19.831204 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e726809-c215-4d1a-95a3-d0fadede3cca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.080917 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-7w7c7" event={"ID":"8e726809-c215-4d1a-95a3-d0fadede3cca","Type":"ContainerDied","Data":"126e343945a386573f7a985814f180bc379c483865c325a1d87fbca3fd91cda2"} Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.081291 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="126e343945a386573f7a985814f180bc379c483865c325a1d87fbca3fd91cda2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.081356 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-7w7c7" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.142534 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7dfbf776bb-kgx2k"] Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.259245 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-6c9d4bc54d-drmz2"] Nov 25 15:39:20 crc kubenswrapper[4800]: E1125 15:39:20.259755 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e726809-c215-4d1a-95a3-d0fadede3cca" containerName="placement-db-sync" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.259770 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e726809-c215-4d1a-95a3-d0fadede3cca" containerName="placement-db-sync" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.259974 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e726809-c215-4d1a-95a3-d0fadede3cca" containerName="placement-db-sync" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.261062 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.266251 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.266609 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.268912 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.273576 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-xnnhl" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.275463 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6c9d4bc54d-drmz2"] Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.275639 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.460891 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-internal-tls-certs\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.460956 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-config-data\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.461030 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec5af7dd-2606-4607-b136-51a82b3e4ad8-logs\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.461064 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47qkz\" (UniqueName: \"kubernetes.io/projected/ec5af7dd-2606-4607-b136-51a82b3e4ad8-kube-api-access-47qkz\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.461118 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-public-tls-certs\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.461342 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-scripts\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.461365 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-combined-ca-bundle\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.564762 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-public-tls-certs\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.564832 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-scripts\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.564884 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-combined-ca-bundle\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.564929 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-internal-tls-certs\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.564973 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-config-data\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.565071 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec5af7dd-2606-4607-b136-51a82b3e4ad8-logs\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.565110 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47qkz\" (UniqueName: \"kubernetes.io/projected/ec5af7dd-2606-4607-b136-51a82b3e4ad8-kube-api-access-47qkz\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.571308 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec5af7dd-2606-4607-b136-51a82b3e4ad8-logs\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.573660 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-internal-tls-certs\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.574570 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-scripts\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.574833 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-config-data\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.585748 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-combined-ca-bundle\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.588272 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec5af7dd-2606-4607-b136-51a82b3e4ad8-public-tls-certs\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.592781 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47qkz\" (UniqueName: \"kubernetes.io/projected/ec5af7dd-2606-4607-b136-51a82b3e4ad8-kube-api-access-47qkz\") pod \"placement-6c9d4bc54d-drmz2\" (UID: \"ec5af7dd-2606-4607-b136-51a82b3e4ad8\") " pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:20 crc kubenswrapper[4800]: I1125 15:39:20.632999 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:21 crc kubenswrapper[4800]: I1125 15:39:21.098794 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-shvrf" event={"ID":"b030f9b2-f92c-40d4-b92a-7c99d4af8358","Type":"ContainerStarted","Data":"ba816d6eaa1f597a866744f7884185d6679bacca65eab4bd5ab973f10cb49f2e"} Nov 25 15:39:21 crc kubenswrapper[4800]: I1125 15:39:21.104194 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7dfbf776bb-kgx2k" event={"ID":"52ce962a-182b-48b4-96ee-225161f70f29","Type":"ContainerStarted","Data":"cbd0ba941eea9dc97098593a70b586c3d8692ce83c306a32c573c5c0ed2d0100"} Nov 25 15:39:21 crc kubenswrapper[4800]: I1125 15:39:21.104245 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7dfbf776bb-kgx2k" event={"ID":"52ce962a-182b-48b4-96ee-225161f70f29","Type":"ContainerStarted","Data":"f5b99fa34e10b29746a9416ea31f0dc7ad38282694a790e44da7ace5492998bd"} Nov 25 15:39:21 crc kubenswrapper[4800]: I1125 15:39:21.104417 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:21 crc kubenswrapper[4800]: I1125 15:39:21.159996 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-shvrf" podStartSLOduration=2.897111191 podStartE2EDuration="48.159971169s" podCreationTimestamp="2025-11-25 15:38:33 +0000 UTC" firstStartedPulling="2025-11-25 15:38:35.133658713 +0000 UTC m=+1276.188067195" lastFinishedPulling="2025-11-25 15:39:20.396518691 +0000 UTC m=+1321.450927173" observedRunningTime="2025-11-25 15:39:21.135431404 +0000 UTC m=+1322.189839916" watchObservedRunningTime="2025-11-25 15:39:21.159971169 +0000 UTC m=+1322.214379651" Nov 25 15:39:21 crc kubenswrapper[4800]: I1125 15:39:21.171908 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7dfbf776bb-kgx2k" podStartSLOduration=2.171876172 podStartE2EDuration="2.171876172s" podCreationTimestamp="2025-11-25 15:39:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:21.164753089 +0000 UTC m=+1322.219161571" watchObservedRunningTime="2025-11-25 15:39:21.171876172 +0000 UTC m=+1322.226284664" Nov 25 15:39:21 crc kubenswrapper[4800]: I1125 15:39:21.319969 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6c9d4bc54d-drmz2"] Nov 25 15:39:22 crc kubenswrapper[4800]: I1125 15:39:22.152661 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6c9d4bc54d-drmz2" event={"ID":"ec5af7dd-2606-4607-b136-51a82b3e4ad8","Type":"ContainerStarted","Data":"328b09c8368b6eca43af03423b2a2a8bb6bbe5269ec034dc85c1541e80c8fb64"} Nov 25 15:39:22 crc kubenswrapper[4800]: I1125 15:39:22.152719 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6c9d4bc54d-drmz2" event={"ID":"ec5af7dd-2606-4607-b136-51a82b3e4ad8","Type":"ContainerStarted","Data":"a5c1c8c38478cb898ddddc67796f66ca8da5a9f2225f4781bdfee2662e114fb1"} Nov 25 15:39:23 crc kubenswrapper[4800]: I1125 15:39:23.162266 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6c9d4bc54d-drmz2" event={"ID":"ec5af7dd-2606-4607-b136-51a82b3e4ad8","Type":"ContainerStarted","Data":"6f3737f91252d4b7bbd5cf4077aaaf6b33de7c99b384f5ca4131380dba565956"} Nov 25 15:39:23 crc kubenswrapper[4800]: I1125 15:39:23.163129 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:23 crc kubenswrapper[4800]: I1125 15:39:23.200447 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-6c9d4bc54d-drmz2" podStartSLOduration=3.200423362 podStartE2EDuration="3.200423362s" podCreationTimestamp="2025-11-25 15:39:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:23.189939867 +0000 UTC m=+1324.244348359" watchObservedRunningTime="2025-11-25 15:39:23.200423362 +0000 UTC m=+1324.254831844" Nov 25 15:39:24 crc kubenswrapper[4800]: I1125 15:39:24.208135 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-2d48v" event={"ID":"15830745-aef8-4482-8885-6a5969795af6","Type":"ContainerStarted","Data":"5f3411508cfef89239b8cab3a83cb6b80b08a61992feda649c1a6193b7f1b7ad"} Nov 25 15:39:24 crc kubenswrapper[4800]: I1125 15:39:24.208361 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:24 crc kubenswrapper[4800]: I1125 15:39:24.236726 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-2d48v" podStartSLOduration=3.152727892 podStartE2EDuration="51.236700478s" podCreationTimestamp="2025-11-25 15:38:33 +0000 UTC" firstStartedPulling="2025-11-25 15:38:35.180696848 +0000 UTC m=+1276.235105330" lastFinishedPulling="2025-11-25 15:39:23.264669434 +0000 UTC m=+1324.319077916" observedRunningTime="2025-11-25 15:39:24.228122964 +0000 UTC m=+1325.282531446" watchObservedRunningTime="2025-11-25 15:39:24.236700478 +0000 UTC m=+1325.291108960" Nov 25 15:39:25 crc kubenswrapper[4800]: I1125 15:39:25.222194 4800 generic.go:334] "Generic (PLEG): container finished" podID="28782c8c-88d7-48d6-bd10-3b64cff49706" containerID="0eefb7b7fc795dec6cd9666544ce5a4b72c5db4487024f3254aa19e1f2bcbfb8" exitCode=0 Nov 25 15:39:25 crc kubenswrapper[4800]: I1125 15:39:25.222416 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rq7kb" event={"ID":"28782c8c-88d7-48d6-bd10-3b64cff49706","Type":"ContainerDied","Data":"0eefb7b7fc795dec6cd9666544ce5a4b72c5db4487024f3254aa19e1f2bcbfb8"} Nov 25 15:39:25 crc kubenswrapper[4800]: I1125 15:39:25.229775 4800 generic.go:334] "Generic (PLEG): container finished" podID="b030f9b2-f92c-40d4-b92a-7c99d4af8358" containerID="ba816d6eaa1f597a866744f7884185d6679bacca65eab4bd5ab973f10cb49f2e" exitCode=0 Nov 25 15:39:25 crc kubenswrapper[4800]: I1125 15:39:25.229944 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-shvrf" event={"ID":"b030f9b2-f92c-40d4-b92a-7c99d4af8358","Type":"ContainerDied","Data":"ba816d6eaa1f597a866744f7884185d6679bacca65eab4bd5ab973f10cb49f2e"} Nov 25 15:39:27 crc kubenswrapper[4800]: I1125 15:39:27.684549 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7c78ff894b-2g5wf" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.138:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.138:8443: connect: connection refused" Nov 25 15:39:27 crc kubenswrapper[4800]: I1125 15:39:27.871627 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-56fb8dbc98-w4xzj" podUID="bc0a04ce-9c18-468e-a9bb-7f8ab46f176d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.139:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.139:8443: connect: connection refused" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.038880 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-shvrf" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.046342 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.088295 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b030f9b2-f92c-40d4-b92a-7c99d4af8358-combined-ca-bundle\") pod \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\" (UID: \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\") " Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.088356 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b030f9b2-f92c-40d4-b92a-7c99d4af8358-db-sync-config-data\") pod \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\" (UID: \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\") " Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.088448 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/28782c8c-88d7-48d6-bd10-3b64cff49706-config\") pod \"28782c8c-88d7-48d6-bd10-3b64cff49706\" (UID: \"28782c8c-88d7-48d6-bd10-3b64cff49706\") " Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.088473 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hm78s\" (UniqueName: \"kubernetes.io/projected/28782c8c-88d7-48d6-bd10-3b64cff49706-kube-api-access-hm78s\") pod \"28782c8c-88d7-48d6-bd10-3b64cff49706\" (UID: \"28782c8c-88d7-48d6-bd10-3b64cff49706\") " Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.088553 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67jvq\" (UniqueName: \"kubernetes.io/projected/b030f9b2-f92c-40d4-b92a-7c99d4af8358-kube-api-access-67jvq\") pod \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\" (UID: \"b030f9b2-f92c-40d4-b92a-7c99d4af8358\") " Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.088660 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28782c8c-88d7-48d6-bd10-3b64cff49706-combined-ca-bundle\") pod \"28782c8c-88d7-48d6-bd10-3b64cff49706\" (UID: \"28782c8c-88d7-48d6-bd10-3b64cff49706\") " Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.113745 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28782c8c-88d7-48d6-bd10-3b64cff49706-kube-api-access-hm78s" (OuterVolumeSpecName: "kube-api-access-hm78s") pod "28782c8c-88d7-48d6-bd10-3b64cff49706" (UID: "28782c8c-88d7-48d6-bd10-3b64cff49706"). InnerVolumeSpecName "kube-api-access-hm78s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.117249 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b030f9b2-f92c-40d4-b92a-7c99d4af8358-kube-api-access-67jvq" (OuterVolumeSpecName: "kube-api-access-67jvq") pod "b030f9b2-f92c-40d4-b92a-7c99d4af8358" (UID: "b030f9b2-f92c-40d4-b92a-7c99d4af8358"). InnerVolumeSpecName "kube-api-access-67jvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.122799 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b030f9b2-f92c-40d4-b92a-7c99d4af8358-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b030f9b2-f92c-40d4-b92a-7c99d4af8358" (UID: "b030f9b2-f92c-40d4-b92a-7c99d4af8358"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.127388 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28782c8c-88d7-48d6-bd10-3b64cff49706-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "28782c8c-88d7-48d6-bd10-3b64cff49706" (UID: "28782c8c-88d7-48d6-bd10-3b64cff49706"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.132323 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28782c8c-88d7-48d6-bd10-3b64cff49706-config" (OuterVolumeSpecName: "config") pod "28782c8c-88d7-48d6-bd10-3b64cff49706" (UID: "28782c8c-88d7-48d6-bd10-3b64cff49706"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.152994 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b030f9b2-f92c-40d4-b92a-7c99d4af8358-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b030f9b2-f92c-40d4-b92a-7c99d4af8358" (UID: "b030f9b2-f92c-40d4-b92a-7c99d4af8358"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.191729 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28782c8c-88d7-48d6-bd10-3b64cff49706-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.191771 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b030f9b2-f92c-40d4-b92a-7c99d4af8358-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.191783 4800 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b030f9b2-f92c-40d4-b92a-7c99d4af8358-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.191794 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/28782c8c-88d7-48d6-bd10-3b64cff49706-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.191804 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hm78s\" (UniqueName: \"kubernetes.io/projected/28782c8c-88d7-48d6-bd10-3b64cff49706-kube-api-access-hm78s\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.191817 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67jvq\" (UniqueName: \"kubernetes.io/projected/b030f9b2-f92c-40d4-b92a-7c99d4af8358-kube-api-access-67jvq\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.304855 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rq7kb" event={"ID":"28782c8c-88d7-48d6-bd10-3b64cff49706","Type":"ContainerDied","Data":"8127188c31016c69be415f2f79c24a032c3652050abfe5a147183ed5bc468fb4"} Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.304920 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8127188c31016c69be415f2f79c24a032c3652050abfe5a147183ed5bc468fb4" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.304916 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rq7kb" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.309768 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-shvrf" event={"ID":"b030f9b2-f92c-40d4-b92a-7c99d4af8358","Type":"ContainerDied","Data":"e345380c05e09fd867477710bdb4127ff67f1234eb92d68a38b966b7c9943b0d"} Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.309795 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e345380c05e09fd867477710bdb4127ff67f1234eb92d68a38b966b7c9943b0d" Nov 25 15:39:30 crc kubenswrapper[4800]: I1125 15:39:30.309874 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-shvrf" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.292300 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7664b8b7d5-cx4ps"] Nov 25 15:39:31 crc kubenswrapper[4800]: E1125 15:39:31.293055 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b030f9b2-f92c-40d4-b92a-7c99d4af8358" containerName="barbican-db-sync" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.293070 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b030f9b2-f92c-40d4-b92a-7c99d4af8358" containerName="barbican-db-sync" Nov 25 15:39:31 crc kubenswrapper[4800]: E1125 15:39:31.293081 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28782c8c-88d7-48d6-bd10-3b64cff49706" containerName="neutron-db-sync" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.293087 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="28782c8c-88d7-48d6-bd10-3b64cff49706" containerName="neutron-db-sync" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.293276 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="28782c8c-88d7-48d6-bd10-3b64cff49706" containerName="neutron-db-sync" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.293304 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b030f9b2-f92c-40d4-b92a-7c99d4af8358" containerName="barbican-db-sync" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.294353 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.306734 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-5d486dc894-hwkxc"] Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.307751 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-jcx75" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.308292 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.308730 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.308977 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.318348 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.318635 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a975dd2f-273e-4d84-8a2b-96badfae1fdb-config-data-custom\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.318684 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-config-data\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.318708 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmtxx\" (UniqueName: \"kubernetes.io/projected/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-kube-api-access-kmtxx\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.318731 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a975dd2f-273e-4d84-8a2b-96badfae1fdb-combined-ca-bundle\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.318770 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-logs\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.318815 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-combined-ca-bundle\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.318834 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzsqz\" (UniqueName: \"kubernetes.io/projected/a975dd2f-273e-4d84-8a2b-96badfae1fdb-kube-api-access-wzsqz\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.319021 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a975dd2f-273e-4d84-8a2b-96badfae1fdb-logs\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.319058 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-config-data-custom\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.319087 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a975dd2f-273e-4d84-8a2b-96badfae1fdb-config-data\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.349049 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7664b8b7d5-cx4ps"] Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.352589 4800 generic.go:334] "Generic (PLEG): container finished" podID="15830745-aef8-4482-8885-6a5969795af6" containerID="5f3411508cfef89239b8cab3a83cb6b80b08a61992feda649c1a6193b7f1b7ad" exitCode=0 Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.352686 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-2d48v" event={"ID":"15830745-aef8-4482-8885-6a5969795af6","Type":"ContainerDied","Data":"5f3411508cfef89239b8cab3a83cb6b80b08a61992feda649c1a6193b7f1b7ad"} Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.370788 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5d486dc894-hwkxc"] Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.423646 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a975dd2f-273e-4d84-8a2b-96badfae1fdb-config-data-custom\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.423755 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-config-data\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.431353 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmtxx\" (UniqueName: \"kubernetes.io/projected/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-kube-api-access-kmtxx\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.431422 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a975dd2f-273e-4d84-8a2b-96badfae1fdb-combined-ca-bundle\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.431530 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-logs\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.431697 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-combined-ca-bundle\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.431731 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzsqz\" (UniqueName: \"kubernetes.io/projected/a975dd2f-273e-4d84-8a2b-96badfae1fdb-kube-api-access-wzsqz\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.431860 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a975dd2f-273e-4d84-8a2b-96badfae1fdb-logs\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.431941 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-config-data-custom\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.431994 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a975dd2f-273e-4d84-8a2b-96badfae1fdb-config-data\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.440159 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-config-data\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.440440 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a975dd2f-273e-4d84-8a2b-96badfae1fdb-config-data-custom\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.441544 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-logs\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.441901 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a975dd2f-273e-4d84-8a2b-96badfae1fdb-logs\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.449942 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-combined-ca-bundle\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.452364 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-config-data-custom\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.460427 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a975dd2f-273e-4d84-8a2b-96badfae1fdb-config-data\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.469595 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzsqz\" (UniqueName: \"kubernetes.io/projected/a975dd2f-273e-4d84-8a2b-96badfae1fdb-kube-api-access-wzsqz\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.473303 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmtxx\" (UniqueName: \"kubernetes.io/projected/c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8-kube-api-access-kmtxx\") pod \"barbican-keystone-listener-5d486dc894-hwkxc\" (UID: \"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8\") " pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.473458 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a975dd2f-273e-4d84-8a2b-96badfae1fdb-combined-ca-bundle\") pod \"barbican-worker-7664b8b7d5-cx4ps\" (UID: \"a975dd2f-273e-4d84-8a2b-96badfae1fdb\") " pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.549190 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c65849c7f-7fcqh"] Nov 25 15:39:31 crc kubenswrapper[4800]: E1125 15:39:31.549392 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.551000 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.595140 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c65849c7f-7fcqh"] Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.636371 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c65849c7f-7fcqh"] Nov 25 15:39:31 crc kubenswrapper[4800]: E1125 15:39:31.636673 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-qnjcx ovsdbserver-nb ovsdbserver-sb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" podUID="d5dab5c2-fa84-497c-8f8d-3f9a50a003fe" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.645917 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7664b8b7d5-cx4ps" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.656718 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.670975 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7776d59f89-jmmj9"] Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.672763 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.672938 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-dns-svc\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.672982 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-ovsdbserver-sb\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.673077 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-config\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.673155 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-ovsdbserver-nb\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.673185 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnjcx\" (UniqueName: \"kubernetes.io/projected/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-kube-api-access-qnjcx\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.700312 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-778948847d-68g7j"] Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.702058 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.706543 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.735522 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7776d59f89-jmmj9"] Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.753918 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-778948847d-68g7j"] Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775011 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-ovsdbserver-nb\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775088 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnjcx\" (UniqueName: \"kubernetes.io/projected/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-kube-api-access-qnjcx\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775123 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-config-data\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775155 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-dns-svc\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775193 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-ovsdbserver-sb\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775227 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-dns-svc\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775249 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-ovsdbserver-nb\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775283 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcg8k\" (UniqueName: \"kubernetes.io/projected/cc411158-08a6-449d-b84b-b33cec795519-kube-api-access-mcg8k\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775317 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-config-data-custom\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775336 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-ovsdbserver-sb\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775361 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-config\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775384 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cp25w\" (UniqueName: \"kubernetes.io/projected/6855c149-842e-4bde-b262-447fb978ffa8-kube-api-access-cp25w\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775411 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-combined-ca-bundle\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775431 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6855c149-842e-4bde-b262-447fb978ffa8-logs\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.775445 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-config\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.776531 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-ovsdbserver-nb\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.777430 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-dns-svc\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.778806 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-968b7bdb8-ttgfk"] Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.779082 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-ovsdbserver-sb\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.779570 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-config\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.780645 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.785618 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.785944 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.786075 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-frhp6" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.788207 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.803574 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnjcx\" (UniqueName: \"kubernetes.io/projected/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-kube-api-access-qnjcx\") pod \"dnsmasq-dns-7c65849c7f-7fcqh\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.826276 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-968b7bdb8-ttgfk"] Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.881252 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-combined-ca-bundle\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.881726 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6855c149-842e-4bde-b262-447fb978ffa8-logs\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.881751 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-config\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.881852 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-ovndb-tls-certs\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.881895 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-config-data\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.881914 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-httpd-config\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.881943 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-dns-svc\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.881967 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-ovsdbserver-nb\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.882008 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcg8k\" (UniqueName: \"kubernetes.io/projected/cc411158-08a6-449d-b84b-b33cec795519-kube-api-access-mcg8k\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.882072 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-config-data-custom\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.882089 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f86m6\" (UniqueName: \"kubernetes.io/projected/f4aa678c-d6e5-4dac-8e75-87a5a190badb-kube-api-access-f86m6\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.882115 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-config\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.882136 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-ovsdbserver-sb\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.882161 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-combined-ca-bundle\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.882192 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cp25w\" (UniqueName: \"kubernetes.io/projected/6855c149-842e-4bde-b262-447fb978ffa8-kube-api-access-cp25w\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.885364 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6855c149-842e-4bde-b262-447fb978ffa8-logs\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.887873 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-dns-svc\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.895404 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-config-data\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.895405 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-config-data-custom\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.897399 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-combined-ca-bundle\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.898204 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-ovsdbserver-nb\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.898703 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-ovsdbserver-sb\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.899201 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-config\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.910513 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cp25w\" (UniqueName: \"kubernetes.io/projected/6855c149-842e-4bde-b262-447fb978ffa8-kube-api-access-cp25w\") pod \"barbican-api-778948847d-68g7j\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.913563 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcg8k\" (UniqueName: \"kubernetes.io/projected/cc411158-08a6-449d-b84b-b33cec795519-kube-api-access-mcg8k\") pod \"dnsmasq-dns-7776d59f89-jmmj9\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.985388 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-ovndb-tls-certs\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.985484 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-httpd-config\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.985576 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f86m6\" (UniqueName: \"kubernetes.io/projected/f4aa678c-d6e5-4dac-8e75-87a5a190badb-kube-api-access-f86m6\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.985608 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-config\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:31 crc kubenswrapper[4800]: I1125 15:39:31.985635 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-combined-ca-bundle\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:31.999548 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-httpd-config\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:31.999825 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-combined-ca-bundle\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.001640 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-ovndb-tls-certs\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.001718 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-config\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.008597 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f86m6\" (UniqueName: \"kubernetes.io/projected/f4aa678c-d6e5-4dac-8e75-87a5a190badb-kube-api-access-f86m6\") pod \"neutron-968b7bdb8-ttgfk\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.029554 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.059740 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.110475 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.321379 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5d486dc894-hwkxc"] Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.372264 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" event={"ID":"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8","Type":"ContainerStarted","Data":"bdd19c78bb711a44cb1dcf71eb06498b25d94bad287556a43a4d777f1a25cce0"} Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.388662 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.389050 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8","Type":"ContainerStarted","Data":"2f221d7b41d5a1a6b52e94725df85e2ec9ba95e202c6814abd991db41f58d34f"} Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.389260 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerName="ceilometer-notification-agent" containerID="cri-o://2e92aa247f5af8970ac02e53900aa1757ae3a0803117d919a059b06bac2e6823" gracePeriod=30 Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.389351 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.389905 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerName="proxy-httpd" containerID="cri-o://2f221d7b41d5a1a6b52e94725df85e2ec9ba95e202c6814abd991db41f58d34f" gracePeriod=30 Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.389947 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerName="sg-core" containerID="cri-o://ffdc5ebf574e50e11ff0566e05670dcabd2d824fd1a572875a75f61fe94a806f" gracePeriod=30 Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.424304 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.471371 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7664b8b7d5-cx4ps"] Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.500685 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-ovsdbserver-nb\") pod \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.500757 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-config\") pod \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.500977 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnjcx\" (UniqueName: \"kubernetes.io/projected/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-kube-api-access-qnjcx\") pod \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.501156 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-ovsdbserver-sb\") pod \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.501241 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-dns-svc\") pod \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\" (UID: \"d5dab5c2-fa84-497c-8f8d-3f9a50a003fe\") " Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.501487 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d5dab5c2-fa84-497c-8f8d-3f9a50a003fe" (UID: "d5dab5c2-fa84-497c-8f8d-3f9a50a003fe"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.502006 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d5dab5c2-fa84-497c-8f8d-3f9a50a003fe" (UID: "d5dab5c2-fa84-497c-8f8d-3f9a50a003fe"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.502294 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-config" (OuterVolumeSpecName: "config") pod "d5dab5c2-fa84-497c-8f8d-3f9a50a003fe" (UID: "d5dab5c2-fa84-497c-8f8d-3f9a50a003fe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.502455 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d5dab5c2-fa84-497c-8f8d-3f9a50a003fe" (UID: "d5dab5c2-fa84-497c-8f8d-3f9a50a003fe"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.502588 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.502604 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.502618 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.502630 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.512614 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-kube-api-access-qnjcx" (OuterVolumeSpecName: "kube-api-access-qnjcx") pod "d5dab5c2-fa84-497c-8f8d-3f9a50a003fe" (UID: "d5dab5c2-fa84-497c-8f8d-3f9a50a003fe"). InnerVolumeSpecName "kube-api-access-qnjcx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.604485 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnjcx\" (UniqueName: \"kubernetes.io/projected/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe-kube-api-access-qnjcx\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.693321 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-778948847d-68g7j"] Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.833876 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7776d59f89-jmmj9"] Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.879546 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-2d48v" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.918945 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-config-data\") pod \"15830745-aef8-4482-8885-6a5969795af6\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.919117 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-combined-ca-bundle\") pod \"15830745-aef8-4482-8885-6a5969795af6\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.919198 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fjlr\" (UniqueName: \"kubernetes.io/projected/15830745-aef8-4482-8885-6a5969795af6-kube-api-access-9fjlr\") pod \"15830745-aef8-4482-8885-6a5969795af6\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.919228 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-scripts\") pod \"15830745-aef8-4482-8885-6a5969795af6\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.919262 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15830745-aef8-4482-8885-6a5969795af6-etc-machine-id\") pod \"15830745-aef8-4482-8885-6a5969795af6\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.919386 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-db-sync-config-data\") pod \"15830745-aef8-4482-8885-6a5969795af6\" (UID: \"15830745-aef8-4482-8885-6a5969795af6\") " Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.921522 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/15830745-aef8-4482-8885-6a5969795af6-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "15830745-aef8-4482-8885-6a5969795af6" (UID: "15830745-aef8-4482-8885-6a5969795af6"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.926053 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "15830745-aef8-4482-8885-6a5969795af6" (UID: "15830745-aef8-4482-8885-6a5969795af6"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.929567 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15830745-aef8-4482-8885-6a5969795af6-kube-api-access-9fjlr" (OuterVolumeSpecName: "kube-api-access-9fjlr") pod "15830745-aef8-4482-8885-6a5969795af6" (UID: "15830745-aef8-4482-8885-6a5969795af6"). InnerVolumeSpecName "kube-api-access-9fjlr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:32 crc kubenswrapper[4800]: I1125 15:39:32.932429 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-scripts" (OuterVolumeSpecName: "scripts") pod "15830745-aef8-4482-8885-6a5969795af6" (UID: "15830745-aef8-4482-8885-6a5969795af6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.022026 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fjlr\" (UniqueName: \"kubernetes.io/projected/15830745-aef8-4482-8885-6a5969795af6-kube-api-access-9fjlr\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.022066 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.022076 4800 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15830745-aef8-4482-8885-6a5969795af6-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.022085 4800 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.027279 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-config-data" (OuterVolumeSpecName: "config-data") pod "15830745-aef8-4482-8885-6a5969795af6" (UID: "15830745-aef8-4482-8885-6a5969795af6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.028153 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "15830745-aef8-4482-8885-6a5969795af6" (UID: "15830745-aef8-4482-8885-6a5969795af6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.124366 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.124810 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15830745-aef8-4482-8885-6a5969795af6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.366224 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-968b7bdb8-ttgfk"] Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.407419 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-778948847d-68g7j" event={"ID":"6855c149-842e-4bde-b262-447fb978ffa8","Type":"ContainerStarted","Data":"bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9"} Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.407490 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-778948847d-68g7j" event={"ID":"6855c149-842e-4bde-b262-447fb978ffa8","Type":"ContainerStarted","Data":"1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c"} Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.407504 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-778948847d-68g7j" event={"ID":"6855c149-842e-4bde-b262-447fb978ffa8","Type":"ContainerStarted","Data":"50f1ab51cb0d2451340e6512a358a03aa1266c273f4feaf77ee2ee82fd01cda5"} Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.408636 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.408663 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.428217 4800 generic.go:334] "Generic (PLEG): container finished" podID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerID="2f221d7b41d5a1a6b52e94725df85e2ec9ba95e202c6814abd991db41f58d34f" exitCode=0 Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.428249 4800 generic.go:334] "Generic (PLEG): container finished" podID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerID="ffdc5ebf574e50e11ff0566e05670dcabd2d824fd1a572875a75f61fe94a806f" exitCode=2 Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.428316 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8","Type":"ContainerDied","Data":"2f221d7b41d5a1a6b52e94725df85e2ec9ba95e202c6814abd991db41f58d34f"} Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.428350 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8","Type":"ContainerDied","Data":"ffdc5ebf574e50e11ff0566e05670dcabd2d824fd1a572875a75f61fe94a806f"} Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.437312 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-968b7bdb8-ttgfk" event={"ID":"f4aa678c-d6e5-4dac-8e75-87a5a190badb","Type":"ContainerStarted","Data":"52fe29b64bbf31f0cb55549ca701b07c8aa09cd1e196526f80aaa441431da3aa"} Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.441260 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-778948847d-68g7j" podStartSLOduration=2.441237225 podStartE2EDuration="2.441237225s" podCreationTimestamp="2025-11-25 15:39:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:33.434410951 +0000 UTC m=+1334.488819433" watchObservedRunningTime="2025-11-25 15:39:33.441237225 +0000 UTC m=+1334.495645707" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.441328 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7664b8b7d5-cx4ps" event={"ID":"a975dd2f-273e-4d84-8a2b-96badfae1fdb","Type":"ContainerStarted","Data":"3075e7b65d1d5b2e3ad1ec0ec40bb3a04ba46a658f0e3161a55ba1c11523eb88"} Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.443050 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-2d48v" event={"ID":"15830745-aef8-4482-8885-6a5969795af6","Type":"ContainerDied","Data":"a65000abd7aa2634e232fc030fdd92b55d0c49b5c047748227e6a156fe3b6385"} Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.443078 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a65000abd7aa2634e232fc030fdd92b55d0c49b5c047748227e6a156fe3b6385" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.443140 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-2d48v" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.457304 4800 generic.go:334] "Generic (PLEG): container finished" podID="cc411158-08a6-449d-b84b-b33cec795519" containerID="02ff0fa4528bcaeac769f6ce79b7f7e75e81c5e403fa166890c1abb666c54243" exitCode=0 Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.457423 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c65849c7f-7fcqh" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.459676 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" event={"ID":"cc411158-08a6-449d-b84b-b33cec795519","Type":"ContainerDied","Data":"02ff0fa4528bcaeac769f6ce79b7f7e75e81c5e403fa166890c1abb666c54243"} Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.459739 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" event={"ID":"cc411158-08a6-449d-b84b-b33cec795519","Type":"ContainerStarted","Data":"97c64f8a0fd28409def865382b4f36384477959c1976d7744cda87a4f4f38da9"} Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.599315 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c65849c7f-7fcqh"] Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.611299 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c65849c7f-7fcqh"] Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.809959 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5dab5c2-fa84-497c-8f8d-3f9a50a003fe" path="/var/lib/kubelet/pods/d5dab5c2-fa84-497c-8f8d-3f9a50a003fe/volumes" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.877497 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 15:39:33 crc kubenswrapper[4800]: E1125 15:39:33.878041 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15830745-aef8-4482-8885-6a5969795af6" containerName="cinder-db-sync" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.878062 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="15830745-aef8-4482-8885-6a5969795af6" containerName="cinder-db-sync" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.878222 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="15830745-aef8-4482-8885-6a5969795af6" containerName="cinder-db-sync" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.879268 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.889735 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8pz6v" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.889996 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.890179 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.890307 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.926331 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.961142 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.961256 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-config-data\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.961281 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.961298 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1589827e-3ef5-4aea-bbfd-2a783d6deb83-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.961324 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9qtd\" (UniqueName: \"kubernetes.io/projected/1589827e-3ef5-4aea-bbfd-2a783d6deb83-kube-api-access-w9qtd\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:33 crc kubenswrapper[4800]: I1125 15:39:33.961347 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-scripts\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.048030 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7776d59f89-jmmj9"] Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.064852 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.064954 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-config-data\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.064979 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.064998 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1589827e-3ef5-4aea-bbfd-2a783d6deb83-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.065020 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9qtd\" (UniqueName: \"kubernetes.io/projected/1589827e-3ef5-4aea-bbfd-2a783d6deb83-kube-api-access-w9qtd\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.065043 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-scripts\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.068118 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1589827e-3ef5-4aea-bbfd-2a783d6deb83-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.078215 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.094317 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-scripts\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.101237 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-config-data\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.111569 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.120740 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9qtd\" (UniqueName: \"kubernetes.io/projected/1589827e-3ef5-4aea-bbfd-2a783d6deb83-kube-api-access-w9qtd\") pod \"cinder-scheduler-0\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.129926 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bc89f58d7-cghtv"] Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.131584 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.180350 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bc89f58d7-cghtv"] Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.212355 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.273285 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-config\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.273368 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-ovsdbserver-sb\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.273393 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g6vl\" (UniqueName: \"kubernetes.io/projected/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-kube-api-access-9g6vl\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.273455 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-dns-svc\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.273484 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-ovsdbserver-nb\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.292867 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.295041 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.306500 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.352030 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.380183 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-dns-svc\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.380241 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-ovsdbserver-nb\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.380305 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b78e12a-b4be-4beb-a685-52ab93730f55-logs\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.380346 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.380420 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1b78e12a-b4be-4beb-a685-52ab93730f55-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.380449 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-scripts\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.380474 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-config-data-custom\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.380514 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-config\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.380565 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-config-data\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.380592 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6t66\" (UniqueName: \"kubernetes.io/projected/1b78e12a-b4be-4beb-a685-52ab93730f55-kube-api-access-b6t66\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.380619 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-ovsdbserver-sb\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.380646 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9g6vl\" (UniqueName: \"kubernetes.io/projected/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-kube-api-access-9g6vl\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.382339 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-dns-svc\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.382938 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-ovsdbserver-nb\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.383576 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-config\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.384381 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-ovsdbserver-sb\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.414912 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g6vl\" (UniqueName: \"kubernetes.io/projected/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-kube-api-access-9g6vl\") pod \"dnsmasq-dns-7bc89f58d7-cghtv\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.499303 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b78e12a-b4be-4beb-a685-52ab93730f55-logs\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.499386 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.499517 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1b78e12a-b4be-4beb-a685-52ab93730f55-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.499555 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-scripts\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.499582 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-config-data-custom\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.499687 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-config-data\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.499714 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6t66\" (UniqueName: \"kubernetes.io/projected/1b78e12a-b4be-4beb-a685-52ab93730f55-kube-api-access-b6t66\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.500190 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1b78e12a-b4be-4beb-a685-52ab93730f55-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.501626 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b78e12a-b4be-4beb-a685-52ab93730f55-logs\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.506524 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-scripts\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.507596 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.510338 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-config-data-custom\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.523969 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6t66\" (UniqueName: \"kubernetes.io/projected/1b78e12a-b4be-4beb-a685-52ab93730f55-kube-api-access-b6t66\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.527580 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-config-data\") pod \"cinder-api-0\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " pod="openstack/cinder-api-0" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.544872 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" event={"ID":"cc411158-08a6-449d-b84b-b33cec795519","Type":"ContainerStarted","Data":"0db778ce6ee3b82bff973b1604ae2f400ff5c0b602821a6e2134c63c37a6cdcd"} Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.545173 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" podUID="cc411158-08a6-449d-b84b-b33cec795519" containerName="dnsmasq-dns" containerID="cri-o://0db778ce6ee3b82bff973b1604ae2f400ff5c0b602821a6e2134c63c37a6cdcd" gracePeriod=10 Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.545602 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.559378 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-968b7bdb8-ttgfk" event={"ID":"f4aa678c-d6e5-4dac-8e75-87a5a190badb","Type":"ContainerStarted","Data":"d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6"} Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.561427 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.579820 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" podStartSLOduration=3.579797595 podStartE2EDuration="3.579797595s" podCreationTimestamp="2025-11-25 15:39:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:34.572731903 +0000 UTC m=+1335.627140385" watchObservedRunningTime="2025-11-25 15:39:34.579797595 +0000 UTC m=+1335.634206077" Nov 25 15:39:34 crc kubenswrapper[4800]: I1125 15:39:34.746544 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 15:39:35 crc kubenswrapper[4800]: I1125 15:39:34.986014 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 15:39:35 crc kubenswrapper[4800]: I1125 15:39:35.102683 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bc89f58d7-cghtv"] Nov 25 15:39:35 crc kubenswrapper[4800]: I1125 15:39:35.481021 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 15:39:35 crc kubenswrapper[4800]: I1125 15:39:35.567949 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-968b7bdb8-ttgfk" event={"ID":"f4aa678c-d6e5-4dac-8e75-87a5a190badb","Type":"ContainerStarted","Data":"2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447"} Nov 25 15:39:35 crc kubenswrapper[4800]: I1125 15:39:35.569025 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:39:35 crc kubenswrapper[4800]: I1125 15:39:35.572345 4800 generic.go:334] "Generic (PLEG): container finished" podID="cc411158-08a6-449d-b84b-b33cec795519" containerID="0db778ce6ee3b82bff973b1604ae2f400ff5c0b602821a6e2134c63c37a6cdcd" exitCode=0 Nov 25 15:39:35 crc kubenswrapper[4800]: I1125 15:39:35.572526 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" event={"ID":"cc411158-08a6-449d-b84b-b33cec795519","Type":"ContainerDied","Data":"0db778ce6ee3b82bff973b1604ae2f400ff5c0b602821a6e2134c63c37a6cdcd"} Nov 25 15:39:35 crc kubenswrapper[4800]: I1125 15:39:35.604575 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-968b7bdb8-ttgfk" podStartSLOduration=4.604552728 podStartE2EDuration="4.604552728s" podCreationTimestamp="2025-11-25 15:39:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:35.602334889 +0000 UTC m=+1336.656743371" watchObservedRunningTime="2025-11-25 15:39:35.604552728 +0000 UTC m=+1336.658961210" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.153603 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.158515 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-ovsdbserver-nb\") pod \"cc411158-08a6-449d-b84b-b33cec795519\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.158626 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-dns-svc\") pod \"cc411158-08a6-449d-b84b-b33cec795519\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.158680 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-config\") pod \"cc411158-08a6-449d-b84b-b33cec795519\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.158720 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-ovsdbserver-sb\") pod \"cc411158-08a6-449d-b84b-b33cec795519\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.158759 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcg8k\" (UniqueName: \"kubernetes.io/projected/cc411158-08a6-449d-b84b-b33cec795519-kube-api-access-mcg8k\") pod \"cc411158-08a6-449d-b84b-b33cec795519\" (UID: \"cc411158-08a6-449d-b84b-b33cec795519\") " Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.241312 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc411158-08a6-449d-b84b-b33cec795519-kube-api-access-mcg8k" (OuterVolumeSpecName: "kube-api-access-mcg8k") pod "cc411158-08a6-449d-b84b-b33cec795519" (UID: "cc411158-08a6-449d-b84b-b33cec795519"). InnerVolumeSpecName "kube-api-access-mcg8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.262421 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcg8k\" (UniqueName: \"kubernetes.io/projected/cc411158-08a6-449d-b84b-b33cec795519-kube-api-access-mcg8k\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.303983 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cc411158-08a6-449d-b84b-b33cec795519" (UID: "cc411158-08a6-449d-b84b-b33cec795519"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.325231 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cc411158-08a6-449d-b84b-b33cec795519" (UID: "cc411158-08a6-449d-b84b-b33cec795519"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.333184 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-config" (OuterVolumeSpecName: "config") pod "cc411158-08a6-449d-b84b-b33cec795519" (UID: "cc411158-08a6-449d-b84b-b33cec795519"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.334712 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cc411158-08a6-449d-b84b-b33cec795519" (UID: "cc411158-08a6-449d-b84b-b33cec795519"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.364711 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.364765 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.364775 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.364786 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc411158-08a6-449d-b84b-b33cec795519-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.583398 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1b78e12a-b4be-4beb-a685-52ab93730f55","Type":"ContainerStarted","Data":"570483f8174e076ed7c8838b122543c4109103548b09f138c5edde87a697ec57"} Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.585705 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1589827e-3ef5-4aea-bbfd-2a783d6deb83","Type":"ContainerStarted","Data":"7600362049a0dd5fe4466d687daeadfe122c5abce8cddeaf7644599efa31a4c8"} Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.587821 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" event={"ID":"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0","Type":"ContainerStarted","Data":"053867d2a57ce3ea9b551af3336bc647c8af959ed3c36779cf29819427664b9a"} Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.590524 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" event={"ID":"cc411158-08a6-449d-b84b-b33cec795519","Type":"ContainerDied","Data":"97c64f8a0fd28409def865382b4f36384477959c1976d7744cda87a4f4f38da9"} Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.590556 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7776d59f89-jmmj9" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.590781 4800 scope.go:117] "RemoveContainer" containerID="0db778ce6ee3b82bff973b1604ae2f400ff5c0b602821a6e2134c63c37a6cdcd" Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.596856 4800 generic.go:334] "Generic (PLEG): container finished" podID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerID="2e92aa247f5af8970ac02e53900aa1757ae3a0803117d919a059b06bac2e6823" exitCode=0 Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.600377 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8","Type":"ContainerDied","Data":"2e92aa247f5af8970ac02e53900aa1757ae3a0803117d919a059b06bac2e6823"} Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.642381 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7776d59f89-jmmj9"] Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.652386 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7776d59f89-jmmj9"] Nov 25 15:39:36 crc kubenswrapper[4800]: I1125 15:39:36.932913 4800 scope.go:117] "RemoveContainer" containerID="02ff0fa4528bcaeac769f6ce79b7f7e75e81c5e403fa166890c1abb666c54243" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.532776 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.623564 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.648772 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8","Type":"ContainerDied","Data":"e50bb773dfba6651cad6a6fefc1f7afec1dbadd8ac252ffad5b2638fec8c882b"} Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.648870 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.648877 4800 scope.go:117] "RemoveContainer" containerID="2f221d7b41d5a1a6b52e94725df85e2ec9ba95e202c6814abd991db41f58d34f" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.706305 4800 scope.go:117] "RemoveContainer" containerID="ffdc5ebf574e50e11ff0566e05670dcabd2d824fd1a572875a75f61fe94a806f" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.718831 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-log-httpd\") pod \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.718961 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-config-data\") pod \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.719003 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-combined-ca-bundle\") pod \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.719050 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dkfsl\" (UniqueName: \"kubernetes.io/projected/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-kube-api-access-dkfsl\") pod \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.719225 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-scripts\") pod \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.719327 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-sg-core-conf-yaml\") pod \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.719361 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-run-httpd\") pod \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\" (UID: \"445ab78d-8d8a-4f60-8daa-3fab07b4dfa8\") " Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.721033 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" (UID: "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.721937 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" (UID: "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.730603 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-scripts" (OuterVolumeSpecName: "scripts") pod "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" (UID: "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.746912 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-kube-api-access-dkfsl" (OuterVolumeSpecName: "kube-api-access-dkfsl") pod "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" (UID: "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8"). InnerVolumeSpecName "kube-api-access-dkfsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.835438 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.835463 4800 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.835472 4800 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.835486 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dkfsl\" (UniqueName: \"kubernetes.io/projected/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-kube-api-access-dkfsl\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.837891 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" (UID: "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.853043 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" (UID: "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.857061 4800 scope.go:117] "RemoveContainer" containerID="2e92aa247f5af8970ac02e53900aa1757ae3a0803117d919a059b06bac2e6823" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.864038 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc411158-08a6-449d-b84b-b33cec795519" path="/var/lib/kubelet/pods/cc411158-08a6-449d-b84b-b33cec795519/volumes" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.879283 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-97869bf49-tc9dz"] Nov 25 15:39:37 crc kubenswrapper[4800]: E1125 15:39:37.879735 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerName="sg-core" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.879752 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerName="sg-core" Nov 25 15:39:37 crc kubenswrapper[4800]: E1125 15:39:37.879775 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerName="proxy-httpd" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.879784 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerName="proxy-httpd" Nov 25 15:39:37 crc kubenswrapper[4800]: E1125 15:39:37.879816 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerName="ceilometer-notification-agent" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.879823 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerName="ceilometer-notification-agent" Nov 25 15:39:37 crc kubenswrapper[4800]: E1125 15:39:37.879834 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc411158-08a6-449d-b84b-b33cec795519" containerName="dnsmasq-dns" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.879857 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc411158-08a6-449d-b84b-b33cec795519" containerName="dnsmasq-dns" Nov 25 15:39:37 crc kubenswrapper[4800]: E1125 15:39:37.879871 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc411158-08a6-449d-b84b-b33cec795519" containerName="init" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.879878 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc411158-08a6-449d-b84b-b33cec795519" containerName="init" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.880124 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerName="proxy-httpd" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.880160 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerName="ceilometer-notification-agent" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.880171 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" containerName="sg-core" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.880183 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc411158-08a6-449d-b84b-b33cec795519" containerName="dnsmasq-dns" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.881233 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-97869bf49-tc9dz"] Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.881347 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.899983 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.900235 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.914768 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-config-data" (OuterVolumeSpecName: "config-data") pod "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" (UID: "445ab78d-8d8a-4f60-8daa-3fab07b4dfa8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.939246 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-public-tls-certs\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.939383 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-config\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.939575 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x82gq\" (UniqueName: \"kubernetes.io/projected/331a354f-72b7-47a2-8cd4-212972eada6b-kube-api-access-x82gq\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.939620 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-internal-tls-certs\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.939791 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-httpd-config\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.939822 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-ovndb-tls-certs\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.948367 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-combined-ca-bundle\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.948468 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.948486 4800 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:37 crc kubenswrapper[4800]: I1125 15:39:37.948495 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.038563 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.050862 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-httpd-config\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.050928 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-ovndb-tls-certs\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.050987 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-combined-ca-bundle\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.051048 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-public-tls-certs\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.051085 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-config\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.051148 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x82gq\" (UniqueName: \"kubernetes.io/projected/331a354f-72b7-47a2-8cd4-212972eada6b-kube-api-access-x82gq\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.051176 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-internal-tls-certs\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.058697 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-internal-tls-certs\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.065192 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.068072 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-httpd-config\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.069113 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-combined-ca-bundle\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.112728 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-config\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.129349 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-ovndb-tls-certs\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.146996 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x82gq\" (UniqueName: \"kubernetes.io/projected/331a354f-72b7-47a2-8cd4-212972eada6b-kube-api-access-x82gq\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.147018 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/331a354f-72b7-47a2-8cd4-212972eada6b-public-tls-certs\") pod \"neutron-97869bf49-tc9dz\" (UID: \"331a354f-72b7-47a2-8cd4-212972eada6b\") " pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.221720 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.248864 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.257136 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.257304 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.265377 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.267207 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.389040 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.389114 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-config-data\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.389139 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-scripts\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.389164 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-log-httpd\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.389234 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-run-httpd\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.389266 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhlkj\" (UniqueName: \"kubernetes.io/projected/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-kube-api-access-fhlkj\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.389301 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.491279 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.491788 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-config-data\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.491815 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-scripts\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.491833 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-log-httpd\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.491897 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-run-httpd\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.491921 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhlkj\" (UniqueName: \"kubernetes.io/projected/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-kube-api-access-fhlkj\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.491951 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.493545 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-log-httpd\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.493589 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-run-httpd\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.505836 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-config-data\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.507278 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.507807 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.508610 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-scripts\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.519733 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhlkj\" (UniqueName: \"kubernetes.io/projected/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-kube-api-access-fhlkj\") pod \"ceilometer-0\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.688135 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.745745 4800 generic.go:334] "Generic (PLEG): container finished" podID="7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" containerID="5ad192cd906564c07f024661ac15d47dcd815d439033125822237908eb3746c0" exitCode=0 Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.745858 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" event={"ID":"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0","Type":"ContainerDied","Data":"5ad192cd906564c07f024661ac15d47dcd815d439033125822237908eb3746c0"} Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.758139 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1b78e12a-b4be-4beb-a685-52ab93730f55","Type":"ContainerStarted","Data":"ebf3fe36beb419ce176fe4595d780b0adfcab20f29e5250f830f9baa308e2706"} Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.760272 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7664b8b7d5-cx4ps" event={"ID":"a975dd2f-273e-4d84-8a2b-96badfae1fdb","Type":"ContainerStarted","Data":"b7125a3a02f6e6a76ec1ab3b19bf6440cdb901efae7246e088f846af68d220a6"} Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.760324 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7664b8b7d5-cx4ps" event={"ID":"a975dd2f-273e-4d84-8a2b-96badfae1fdb","Type":"ContainerStarted","Data":"0269fec27c7c8b756a68900512e6124db2a854ad06bd4ff6b70fc3969c65a56e"} Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.772701 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1589827e-3ef5-4aea-bbfd-2a783d6deb83","Type":"ContainerStarted","Data":"4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a"} Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.779828 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" event={"ID":"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8","Type":"ContainerStarted","Data":"a1faef2098938a4df0fdd6513fa39e8ebe57a37b02ec7f7faa4529262f0d021e"} Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.779919 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" event={"ID":"c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8","Type":"ContainerStarted","Data":"59be7d11ef3452052a7eca9ab54d3bb897ca0bc1c81fb964bde40700cade6e34"} Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.789816 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7664b8b7d5-cx4ps" podStartSLOduration=3.279614975 podStartE2EDuration="7.789762977s" podCreationTimestamp="2025-11-25 15:39:31 +0000 UTC" firstStartedPulling="2025-11-25 15:39:32.484978099 +0000 UTC m=+1333.539386581" lastFinishedPulling="2025-11-25 15:39:36.995126101 +0000 UTC m=+1338.049534583" observedRunningTime="2025-11-25 15:39:38.783400675 +0000 UTC m=+1339.837809167" watchObservedRunningTime="2025-11-25 15:39:38.789762977 +0000 UTC m=+1339.844171459" Nov 25 15:39:38 crc kubenswrapper[4800]: I1125 15:39:38.828926 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-5d486dc894-hwkxc" podStartSLOduration=3.284118897 podStartE2EDuration="7.828902028s" podCreationTimestamp="2025-11-25 15:39:31 +0000 UTC" firstStartedPulling="2025-11-25 15:39:32.350700038 +0000 UTC m=+1333.405108520" lastFinishedPulling="2025-11-25 15:39:36.895483169 +0000 UTC m=+1337.949891651" observedRunningTime="2025-11-25 15:39:38.80794931 +0000 UTC m=+1339.862357822" watchObservedRunningTime="2025-11-25 15:39:38.828902028 +0000 UTC m=+1339.883310510" Nov 25 15:39:39 crc kubenswrapper[4800]: I1125 15:39:39.045557 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-97869bf49-tc9dz"] Nov 25 15:39:39 crc kubenswrapper[4800]: I1125 15:39:39.311954 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:39:39 crc kubenswrapper[4800]: I1125 15:39:39.826627 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="445ab78d-8d8a-4f60-8daa-3fab07b4dfa8" path="/var/lib/kubelet/pods/445ab78d-8d8a-4f60-8daa-3fab07b4dfa8/volumes" Nov 25 15:39:39 crc kubenswrapper[4800]: I1125 15:39:39.868652 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" event={"ID":"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0","Type":"ContainerStarted","Data":"57bb35409c39aa24e351375feedc485afcd89e79fa3cf6997144be05bb40d00e"} Nov 25 15:39:39 crc kubenswrapper[4800]: I1125 15:39:39.869146 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:39 crc kubenswrapper[4800]: I1125 15:39:39.883378 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce","Type":"ContainerStarted","Data":"a415deaa991234ffb292998650d9acbedd8ee7051d6d9a4588206ecf585589c2"} Nov 25 15:39:39 crc kubenswrapper[4800]: I1125 15:39:39.887294 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-97869bf49-tc9dz" event={"ID":"331a354f-72b7-47a2-8cd4-212972eada6b","Type":"ContainerStarted","Data":"18fce95c42ce00f1cac151716da17ed0473d0bb2d90703bf148219c2acb44837"} Nov 25 15:39:39 crc kubenswrapper[4800]: I1125 15:39:39.887326 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-97869bf49-tc9dz" event={"ID":"331a354f-72b7-47a2-8cd4-212972eada6b","Type":"ContainerStarted","Data":"9e1ce2b8a4e215fa13d76707a461b935d252617a5b44fe5ce0127d44a1b1506a"} Nov 25 15:39:39 crc kubenswrapper[4800]: I1125 15:39:39.973488 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" podStartSLOduration=5.973460251 podStartE2EDuration="5.973460251s" podCreationTimestamp="2025-11-25 15:39:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:39.957421395 +0000 UTC m=+1341.011829877" watchObservedRunningTime="2025-11-25 15:39:39.973460251 +0000 UTC m=+1341.027868733" Nov 25 15:39:40 crc kubenswrapper[4800]: W1125 15:39:40.058327 4800 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod445ab78d_8d8a_4f60_8daa_3fab07b4dfa8.slice/crio-conmon-2f221d7b41d5a1a6b52e94725df85e2ec9ba95e202c6814abd991db41f58d34f.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod445ab78d_8d8a_4f60_8daa_3fab07b4dfa8.slice/crio-conmon-2f221d7b41d5a1a6b52e94725df85e2ec9ba95e202c6814abd991db41f58d34f.scope: no such file or directory Nov 25 15:39:40 crc kubenswrapper[4800]: W1125 15:39:40.058442 4800 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod445ab78d_8d8a_4f60_8daa_3fab07b4dfa8.slice/crio-2f221d7b41d5a1a6b52e94725df85e2ec9ba95e202c6814abd991db41f58d34f.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod445ab78d_8d8a_4f60_8daa_3fab07b4dfa8.slice/crio-2f221d7b41d5a1a6b52e94725df85e2ec9ba95e202c6814abd991db41f58d34f.scope: no such file or directory Nov 25 15:39:40 crc kubenswrapper[4800]: W1125 15:39:40.094249 4800 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5dab5c2_fa84_497c_8f8d_3f9a50a003fe.slice": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5dab5c2_fa84_497c_8f8d_3f9a50a003fe.slice: no such file or directory Nov 25 15:39:40 crc kubenswrapper[4800]: W1125 15:39:40.094333 4800 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc411158_08a6_449d_b84b_b33cec795519.slice": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc411158_08a6_449d_b84b_b33cec795519.slice: no such file or directory Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.127266 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:40 crc kubenswrapper[4800]: E1125 15:39:40.614746 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e726809_c215_4d1a_95a3_d0fadede3cca.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod15830745_aef8_4482_8885_6a5969795af6.slice/crio-a65000abd7aa2634e232fc030fdd92b55d0c49b5c047748227e6a156fe3b6385\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod445ab78d_8d8a_4f60_8daa_3fab07b4dfa8.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28782c8c_88d7_48d6_bd10_3b64cff49706.slice/crio-conmon-0eefb7b7fc795dec6cd9666544ce5a4b72c5db4487024f3254aa19e1f2bcbfb8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2f9ee92_fddb_49cf_bb5c_de3435545b92.slice/crio-conmon-f6cfeb997f3cbaeea0d830c43aaa2898fbee56bdd2a9022133548bbd07439c61.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2f9ee92_fddb_49cf_bb5c_de3435545b92.slice/crio-f6cfeb997f3cbaeea0d830c43aaa2898fbee56bdd2a9022133548bbd07439c61.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod15830745_aef8_4482_8885_6a5969795af6.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb030f9b2_f92c_40d4_b92a_7c99d4af8358.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3674fdb_30d8_402d_b9a7_419574d7a0c9.slice/crio-8a6c26c71711ce0333d299c0718aef99e8302ab268415a396bdc6386c729739a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2f9ee92_fddb_49cf_bb5c_de3435545b92.slice/crio-conmon-f59a6fc77f3bd1f7e2f580a20fdc945aaa84ebb563b1085aa79ac0ad365421bb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod445ab78d_8d8a_4f60_8daa_3fab07b4dfa8.slice/crio-conmon-ffdc5ebf574e50e11ff0566e05670dcabd2d824fd1a572875a75f61fe94a806f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbdf98904_e8fc_4c69_9dc7_5e522c269236.slice/crio-conmon-0581e5e372e3185eb1371e6b2331a60f4dd9b0ecbdfa4e8f80a28c89b503aee8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3674fdb_30d8_402d_b9a7_419574d7a0c9.slice/crio-e67de3225dbb64bd48217b8d26b37d34399cfe7dd758c0f6f64a9806a8db8ae8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28782c8c_88d7_48d6_bd10_3b64cff49706.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod445ab78d_8d8a_4f60_8daa_3fab07b4dfa8.slice/crio-2e92aa247f5af8970ac02e53900aa1757ae3a0803117d919a059b06bac2e6823.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28782c8c_88d7_48d6_bd10_3b64cff49706.slice/crio-0eefb7b7fc795dec6cd9666544ce5a4b72c5db4487024f3254aa19e1f2bcbfb8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod445ab78d_8d8a_4f60_8daa_3fab07b4dfa8.slice/crio-conmon-2e92aa247f5af8970ac02e53900aa1757ae3a0803117d919a059b06bac2e6823.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e726809_c215_4d1a_95a3_d0fadede3cca.slice/crio-126e343945a386573f7a985814f180bc379c483865c325a1d87fbca3fd91cda2\": RecentStats: unable to find data in memory cache]" Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.885497 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.913790 4800 generic.go:334] "Generic (PLEG): container finished" podID="bdf98904-e8fc-4c69-9dc7-5e522c269236" containerID="0581e5e372e3185eb1371e6b2331a60f4dd9b0ecbdfa4e8f80a28c89b503aee8" exitCode=137 Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.914025 4800 generic.go:334] "Generic (PLEG): container finished" podID="bdf98904-e8fc-4c69-9dc7-5e522c269236" containerID="433241a89db52601539e3d6a79ced5330c09547a33c578b33156142467170def" exitCode=137 Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.914117 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5497646597-g5gld" event={"ID":"bdf98904-e8fc-4c69-9dc7-5e522c269236","Type":"ContainerDied","Data":"0581e5e372e3185eb1371e6b2331a60f4dd9b0ecbdfa4e8f80a28c89b503aee8"} Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.914206 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5497646597-g5gld" event={"ID":"bdf98904-e8fc-4c69-9dc7-5e522c269236","Type":"ContainerDied","Data":"433241a89db52601539e3d6a79ced5330c09547a33c578b33156142467170def"} Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.935925 4800 generic.go:334] "Generic (PLEG): container finished" podID="c2f9ee92-fddb-49cf-bb5c-de3435545b92" containerID="f6cfeb997f3cbaeea0d830c43aaa2898fbee56bdd2a9022133548bbd07439c61" exitCode=137 Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.936325 4800 generic.go:334] "Generic (PLEG): container finished" podID="c2f9ee92-fddb-49cf-bb5c-de3435545b92" containerID="f59a6fc77f3bd1f7e2f580a20fdc945aaa84ebb563b1085aa79ac0ad365421bb" exitCode=137 Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.936004 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78665cb57c-77xrd" event={"ID":"c2f9ee92-fddb-49cf-bb5c-de3435545b92","Type":"ContainerDied","Data":"f6cfeb997f3cbaeea0d830c43aaa2898fbee56bdd2a9022133548bbd07439c61"} Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.936525 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78665cb57c-77xrd" event={"ID":"c2f9ee92-fddb-49cf-bb5c-de3435545b92","Type":"ContainerDied","Data":"f59a6fc77f3bd1f7e2f580a20fdc945aaa84ebb563b1085aa79ac0ad365421bb"} Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.974645 4800 generic.go:334] "Generic (PLEG): container finished" podID="d3674fdb-30d8-402d-b9a7-419574d7a0c9" containerID="e67de3225dbb64bd48217b8d26b37d34399cfe7dd758c0f6f64a9806a8db8ae8" exitCode=137 Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.974686 4800 generic.go:334] "Generic (PLEG): container finished" podID="d3674fdb-30d8-402d-b9a7-419574d7a0c9" containerID="8a6c26c71711ce0333d299c0718aef99e8302ab268415a396bdc6386c729739a" exitCode=137 Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.974783 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-54cdc54fcf-c45jx" event={"ID":"d3674fdb-30d8-402d-b9a7-419574d7a0c9","Type":"ContainerDied","Data":"e67de3225dbb64bd48217b8d26b37d34399cfe7dd758c0f6f64a9806a8db8ae8"} Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.974821 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-54cdc54fcf-c45jx" event={"ID":"d3674fdb-30d8-402d-b9a7-419574d7a0c9","Type":"ContainerDied","Data":"8a6c26c71711ce0333d299c0718aef99e8302ab268415a396bdc6386c729739a"} Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.989677 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-97869bf49-tc9dz" event={"ID":"331a354f-72b7-47a2-8cd4-212972eada6b","Type":"ContainerStarted","Data":"bb2b6f8a61ac5346d08401cd38281366c136c365ff5628dee9e973d7b589a8e7"} Nov 25 15:39:40 crc kubenswrapper[4800]: I1125 15:39:40.989967 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.016677 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1b78e12a-b4be-4beb-a685-52ab93730f55","Type":"ContainerStarted","Data":"fcb3b1ed4be7e558cc4f53d4d1ca76e63f2de39cbcc86e188165f441b59e353b"} Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.017078 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="1b78e12a-b4be-4beb-a685-52ab93730f55" containerName="cinder-api-log" containerID="cri-o://ebf3fe36beb419ce176fe4595d780b0adfcab20f29e5250f830f9baa308e2706" gracePeriod=30 Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.019304 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.019356 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="1b78e12a-b4be-4beb-a685-52ab93730f55" containerName="cinder-api" containerID="cri-o://fcb3b1ed4be7e558cc4f53d4d1ca76e63f2de39cbcc86e188165f441b59e353b" gracePeriod=30 Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.063666 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1589827e-3ef5-4aea-bbfd-2a783d6deb83","Type":"ContainerStarted","Data":"097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d"} Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.094160 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-97869bf49-tc9dz" podStartSLOduration=4.094130085 podStartE2EDuration="4.094130085s" podCreationTimestamp="2025-11-25 15:39:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:41.068197462 +0000 UTC m=+1342.122605944" watchObservedRunningTime="2025-11-25 15:39:41.094130085 +0000 UTC m=+1342.148538567" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.128376 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=6.759407316 podStartE2EDuration="8.128357752s" podCreationTimestamp="2025-11-25 15:39:33 +0000 UTC" firstStartedPulling="2025-11-25 15:39:35.937501005 +0000 UTC m=+1336.991909497" lastFinishedPulling="2025-11-25 15:39:37.306451451 +0000 UTC m=+1338.360859933" observedRunningTime="2025-11-25 15:39:41.123236703 +0000 UTC m=+1342.177645185" watchObservedRunningTime="2025-11-25 15:39:41.128357752 +0000 UTC m=+1342.182766234" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.194760 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=7.194738392 podStartE2EDuration="7.194738392s" podCreationTimestamp="2025-11-25 15:39:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:41.159400594 +0000 UTC m=+1342.213809096" watchObservedRunningTime="2025-11-25 15:39:41.194738392 +0000 UTC m=+1342.249146874" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.239835 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.343597 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3674fdb-30d8-402d-b9a7-419574d7a0c9-scripts\") pod \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.344275 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d3674fdb-30d8-402d-b9a7-419574d7a0c9-horizon-secret-key\") pod \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.344323 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3674fdb-30d8-402d-b9a7-419574d7a0c9-logs\") pod \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.344353 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d3674fdb-30d8-402d-b9a7-419574d7a0c9-config-data\") pod \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.344422 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqkd9\" (UniqueName: \"kubernetes.io/projected/d3674fdb-30d8-402d-b9a7-419574d7a0c9-kube-api-access-fqkd9\") pod \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\" (UID: \"d3674fdb-30d8-402d-b9a7-419574d7a0c9\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.347364 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3674fdb-30d8-402d-b9a7-419574d7a0c9-logs" (OuterVolumeSpecName: "logs") pod "d3674fdb-30d8-402d-b9a7-419574d7a0c9" (UID: "d3674fdb-30d8-402d-b9a7-419574d7a0c9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.380196 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3674fdb-30d8-402d-b9a7-419574d7a0c9-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "d3674fdb-30d8-402d-b9a7-419574d7a0c9" (UID: "d3674fdb-30d8-402d-b9a7-419574d7a0c9"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.385122 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3674fdb-30d8-402d-b9a7-419574d7a0c9-kube-api-access-fqkd9" (OuterVolumeSpecName: "kube-api-access-fqkd9") pod "d3674fdb-30d8-402d-b9a7-419574d7a0c9" (UID: "d3674fdb-30d8-402d-b9a7-419574d7a0c9"). InnerVolumeSpecName "kube-api-access-fqkd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.448088 4800 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d3674fdb-30d8-402d-b9a7-419574d7a0c9-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.448378 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3674fdb-30d8-402d-b9a7-419574d7a0c9-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.448451 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqkd9\" (UniqueName: \"kubernetes.io/projected/d3674fdb-30d8-402d-b9a7-419574d7a0c9-kube-api-access-fqkd9\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.448730 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3674fdb-30d8-402d-b9a7-419574d7a0c9-config-data" (OuterVolumeSpecName: "config-data") pod "d3674fdb-30d8-402d-b9a7-419574d7a0c9" (UID: "d3674fdb-30d8-402d-b9a7-419574d7a0c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.453541 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3674fdb-30d8-402d-b9a7-419574d7a0c9-scripts" (OuterVolumeSpecName: "scripts") pod "d3674fdb-30d8-402d-b9a7-419574d7a0c9" (UID: "d3674fdb-30d8-402d-b9a7-419574d7a0c9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.459068 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5497646597-g5gld" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.471794 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.550665 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2f9ee92-fddb-49cf-bb5c-de3435545b92-scripts\") pod \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.550809 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcw4c\" (UniqueName: \"kubernetes.io/projected/c2f9ee92-fddb-49cf-bb5c-de3435545b92-kube-api-access-pcw4c\") pod \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.550860 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdf98904-e8fc-4c69-9dc7-5e522c269236-scripts\") pod \"bdf98904-e8fc-4c69-9dc7-5e522c269236\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.551012 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2f9ee92-fddb-49cf-bb5c-de3435545b92-logs\") pod \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.551045 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5pk4\" (UniqueName: \"kubernetes.io/projected/bdf98904-e8fc-4c69-9dc7-5e522c269236-kube-api-access-w5pk4\") pod \"bdf98904-e8fc-4c69-9dc7-5e522c269236\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.551125 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bdf98904-e8fc-4c69-9dc7-5e522c269236-config-data\") pod \"bdf98904-e8fc-4c69-9dc7-5e522c269236\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.551221 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bdf98904-e8fc-4c69-9dc7-5e522c269236-horizon-secret-key\") pod \"bdf98904-e8fc-4c69-9dc7-5e522c269236\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.551252 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bdf98904-e8fc-4c69-9dc7-5e522c269236-logs\") pod \"bdf98904-e8fc-4c69-9dc7-5e522c269236\" (UID: \"bdf98904-e8fc-4c69-9dc7-5e522c269236\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.551313 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2f9ee92-fddb-49cf-bb5c-de3435545b92-config-data\") pod \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.551335 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c2f9ee92-fddb-49cf-bb5c-de3435545b92-horizon-secret-key\") pod \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\" (UID: \"c2f9ee92-fddb-49cf-bb5c-de3435545b92\") " Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.551772 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d3674fdb-30d8-402d-b9a7-419574d7a0c9-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.551789 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3674fdb-30d8-402d-b9a7-419574d7a0c9-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.566351 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2f9ee92-fddb-49cf-bb5c-de3435545b92-kube-api-access-pcw4c" (OuterVolumeSpecName: "kube-api-access-pcw4c") pod "c2f9ee92-fddb-49cf-bb5c-de3435545b92" (UID: "c2f9ee92-fddb-49cf-bb5c-de3435545b92"). InnerVolumeSpecName "kube-api-access-pcw4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.566459 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2f9ee92-fddb-49cf-bb5c-de3435545b92-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "c2f9ee92-fddb-49cf-bb5c-de3435545b92" (UID: "c2f9ee92-fddb-49cf-bb5c-de3435545b92"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.567031 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bdf98904-e8fc-4c69-9dc7-5e522c269236-logs" (OuterVolumeSpecName: "logs") pod "bdf98904-e8fc-4c69-9dc7-5e522c269236" (UID: "bdf98904-e8fc-4c69-9dc7-5e522c269236"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.603922 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2f9ee92-fddb-49cf-bb5c-de3435545b92-logs" (OuterVolumeSpecName: "logs") pod "c2f9ee92-fddb-49cf-bb5c-de3435545b92" (UID: "c2f9ee92-fddb-49cf-bb5c-de3435545b92"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.606167 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdf98904-e8fc-4c69-9dc7-5e522c269236-kube-api-access-w5pk4" (OuterVolumeSpecName: "kube-api-access-w5pk4") pod "bdf98904-e8fc-4c69-9dc7-5e522c269236" (UID: "bdf98904-e8fc-4c69-9dc7-5e522c269236"). InnerVolumeSpecName "kube-api-access-w5pk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.609112 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdf98904-e8fc-4c69-9dc7-5e522c269236-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "bdf98904-e8fc-4c69-9dc7-5e522c269236" (UID: "bdf98904-e8fc-4c69-9dc7-5e522c269236"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.627836 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2f9ee92-fddb-49cf-bb5c-de3435545b92-config-data" (OuterVolumeSpecName: "config-data") pod "c2f9ee92-fddb-49cf-bb5c-de3435545b92" (UID: "c2f9ee92-fddb-49cf-bb5c-de3435545b92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.628341 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2f9ee92-fddb-49cf-bb5c-de3435545b92-scripts" (OuterVolumeSpecName: "scripts") pod "c2f9ee92-fddb-49cf-bb5c-de3435545b92" (UID: "c2f9ee92-fddb-49cf-bb5c-de3435545b92"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.628416 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bdf98904-e8fc-4c69-9dc7-5e522c269236-config-data" (OuterVolumeSpecName: "config-data") pod "bdf98904-e8fc-4c69-9dc7-5e522c269236" (UID: "bdf98904-e8fc-4c69-9dc7-5e522c269236"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.642071 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bdf98904-e8fc-4c69-9dc7-5e522c269236-scripts" (OuterVolumeSpecName: "scripts") pod "bdf98904-e8fc-4c69-9dc7-5e522c269236" (UID: "bdf98904-e8fc-4c69-9dc7-5e522c269236"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.653883 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2f9ee92-fddb-49cf-bb5c-de3435545b92-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.653933 4800 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c2f9ee92-fddb-49cf-bb5c-de3435545b92-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.653952 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2f9ee92-fddb-49cf-bb5c-de3435545b92-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.653969 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcw4c\" (UniqueName: \"kubernetes.io/projected/c2f9ee92-fddb-49cf-bb5c-de3435545b92-kube-api-access-pcw4c\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.653982 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdf98904-e8fc-4c69-9dc7-5e522c269236-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.653996 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5pk4\" (UniqueName: \"kubernetes.io/projected/bdf98904-e8fc-4c69-9dc7-5e522c269236-kube-api-access-w5pk4\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.654007 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2f9ee92-fddb-49cf-bb5c-de3435545b92-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.654018 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bdf98904-e8fc-4c69-9dc7-5e522c269236-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.654030 4800 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bdf98904-e8fc-4c69-9dc7-5e522c269236-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.654041 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bdf98904-e8fc-4c69-9dc7-5e522c269236-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:41 crc kubenswrapper[4800]: I1125 15:39:41.660583 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.068682 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-68d9dc6bf6-nhg96"] Nov 25 15:39:42 crc kubenswrapper[4800]: E1125 15:39:42.069406 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdf98904-e8fc-4c69-9dc7-5e522c269236" containerName="horizon" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.069419 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdf98904-e8fc-4c69-9dc7-5e522c269236" containerName="horizon" Nov 25 15:39:42 crc kubenswrapper[4800]: E1125 15:39:42.069444 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3674fdb-30d8-402d-b9a7-419574d7a0c9" containerName="horizon" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.069451 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3674fdb-30d8-402d-b9a7-419574d7a0c9" containerName="horizon" Nov 25 15:39:42 crc kubenswrapper[4800]: E1125 15:39:42.069462 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2f9ee92-fddb-49cf-bb5c-de3435545b92" containerName="horizon" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.069469 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2f9ee92-fddb-49cf-bb5c-de3435545b92" containerName="horizon" Nov 25 15:39:42 crc kubenswrapper[4800]: E1125 15:39:42.069487 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3674fdb-30d8-402d-b9a7-419574d7a0c9" containerName="horizon-log" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.069493 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3674fdb-30d8-402d-b9a7-419574d7a0c9" containerName="horizon-log" Nov 25 15:39:42 crc kubenswrapper[4800]: E1125 15:39:42.069507 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2f9ee92-fddb-49cf-bb5c-de3435545b92" containerName="horizon-log" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.069512 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2f9ee92-fddb-49cf-bb5c-de3435545b92" containerName="horizon-log" Nov 25 15:39:42 crc kubenswrapper[4800]: E1125 15:39:42.069524 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdf98904-e8fc-4c69-9dc7-5e522c269236" containerName="horizon-log" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.069530 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdf98904-e8fc-4c69-9dc7-5e522c269236" containerName="horizon-log" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.069693 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdf98904-e8fc-4c69-9dc7-5e522c269236" containerName="horizon" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.069703 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3674fdb-30d8-402d-b9a7-419574d7a0c9" containerName="horizon-log" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.069719 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2f9ee92-fddb-49cf-bb5c-de3435545b92" containerName="horizon" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.069735 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdf98904-e8fc-4c69-9dc7-5e522c269236" containerName="horizon-log" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.069745 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2f9ee92-fddb-49cf-bb5c-de3435545b92" containerName="horizon-log" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.069757 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3674fdb-30d8-402d-b9a7-419574d7a0c9" containerName="horizon" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.070721 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.080903 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.081446 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-54cdc54fcf-c45jx" event={"ID":"d3674fdb-30d8-402d-b9a7-419574d7a0c9","Type":"ContainerDied","Data":"2ee82f714777795118ed874f21573e5107a2d50fc7c5bfd8cefc7e74a83c2293"} Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.081497 4800 scope.go:117] "RemoveContainer" containerID="e67de3225dbb64bd48217b8d26b37d34399cfe7dd758c0f6f64a9806a8db8ae8" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.081614 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-54cdc54fcf-c45jx" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.084653 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce","Type":"ContainerStarted","Data":"382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404"} Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.089883 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.094203 4800 generic.go:334] "Generic (PLEG): container finished" podID="1b78e12a-b4be-4beb-a685-52ab93730f55" containerID="ebf3fe36beb419ce176fe4595d780b0adfcab20f29e5250f830f9baa308e2706" exitCode=143 Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.094262 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1b78e12a-b4be-4beb-a685-52ab93730f55","Type":"ContainerDied","Data":"ebf3fe36beb419ce176fe4595d780b0adfcab20f29e5250f830f9baa308e2706"} Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.095524 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5497646597-g5gld" event={"ID":"bdf98904-e8fc-4c69-9dc7-5e522c269236","Type":"ContainerDied","Data":"ef30c8afd5c52c8c5ee0135e8e54af6e108e0ba3c1aa84b4e8596b1152b4dd1f"} Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.095604 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5497646597-g5gld" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.098175 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78665cb57c-77xrd" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.098541 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78665cb57c-77xrd" event={"ID":"c2f9ee92-fddb-49cf-bb5c-de3435545b92","Type":"ContainerDied","Data":"1f2c74349551e4087f959d757c488b95194c30a2bbb7bbd52a5a2421690f0805"} Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.171990 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpxpk\" (UniqueName: \"kubernetes.io/projected/320fcdb8-a11c-411f-aa8c-b0c89011b857-kube-api-access-lpxpk\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.172056 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-config-data\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.172095 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/320fcdb8-a11c-411f-aa8c-b0c89011b857-logs\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.172287 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-combined-ca-bundle\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.172398 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-config-data-custom\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.172493 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-public-tls-certs\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.172531 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-internal-tls-certs\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.192060 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-68d9dc6bf6-nhg96"] Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.226054 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-54cdc54fcf-c45jx"] Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.261419 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-54cdc54fcf-c45jx"] Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.268617 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5497646597-g5gld"] Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.274778 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-config-data-custom\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.276438 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-internal-tls-certs\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.276475 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-public-tls-certs\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.276536 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpxpk\" (UniqueName: \"kubernetes.io/projected/320fcdb8-a11c-411f-aa8c-b0c89011b857-kube-api-access-lpxpk\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.276582 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-config-data\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.276620 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/320fcdb8-a11c-411f-aa8c-b0c89011b857-logs\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.276749 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-combined-ca-bundle\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.276285 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5497646597-g5gld"] Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.280371 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-78665cb57c-77xrd"] Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.280698 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/320fcdb8-a11c-411f-aa8c-b0c89011b857-logs\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.283488 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-public-tls-certs\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.286666 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-78665cb57c-77xrd"] Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.291295 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-combined-ca-bundle\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.294555 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-config-data-custom\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.297660 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpxpk\" (UniqueName: \"kubernetes.io/projected/320fcdb8-a11c-411f-aa8c-b0c89011b857-kube-api-access-lpxpk\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.297671 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-internal-tls-certs\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.304069 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/320fcdb8-a11c-411f-aa8c-b0c89011b857-config-data\") pod \"barbican-api-68d9dc6bf6-nhg96\" (UID: \"320fcdb8-a11c-411f-aa8c-b0c89011b857\") " pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.344981 4800 scope.go:117] "RemoveContainer" containerID="8a6c26c71711ce0333d299c0718aef99e8302ab268415a396bdc6386c729739a" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.430045 4800 scope.go:117] "RemoveContainer" containerID="0581e5e372e3185eb1371e6b2331a60f4dd9b0ecbdfa4e8f80a28c89b503aee8" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.473692 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.676632 4800 scope.go:117] "RemoveContainer" containerID="433241a89db52601539e3d6a79ced5330c09547a33c578b33156142467170def" Nov 25 15:39:42 crc kubenswrapper[4800]: I1125 15:39:42.770286 4800 scope.go:117] "RemoveContainer" containerID="f6cfeb997f3cbaeea0d830c43aaa2898fbee56bdd2a9022133548bbd07439c61" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.142061 4800 scope.go:117] "RemoveContainer" containerID="f59a6fc77f3bd1f7e2f580a20fdc945aaa84ebb563b1085aa79ac0ad365421bb" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.210154 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce","Type":"ContainerStarted","Data":"3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb"} Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.245224 4800 generic.go:334] "Generic (PLEG): container finished" podID="1b78e12a-b4be-4beb-a685-52ab93730f55" containerID="fcb3b1ed4be7e558cc4f53d4d1ca76e63f2de39cbcc86e188165f441b59e353b" exitCode=0 Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.245284 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1b78e12a-b4be-4beb-a685-52ab93730f55","Type":"ContainerDied","Data":"fcb3b1ed4be7e558cc4f53d4d1ca76e63f2de39cbcc86e188165f441b59e353b"} Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.532469 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.609318 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b78e12a-b4be-4beb-a685-52ab93730f55-logs\") pod \"1b78e12a-b4be-4beb-a685-52ab93730f55\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.609654 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-config-data-custom\") pod \"1b78e12a-b4be-4beb-a685-52ab93730f55\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.609717 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1b78e12a-b4be-4beb-a685-52ab93730f55-etc-machine-id\") pod \"1b78e12a-b4be-4beb-a685-52ab93730f55\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.609910 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-config-data\") pod \"1b78e12a-b4be-4beb-a685-52ab93730f55\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.609944 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6t66\" (UniqueName: \"kubernetes.io/projected/1b78e12a-b4be-4beb-a685-52ab93730f55-kube-api-access-b6t66\") pod \"1b78e12a-b4be-4beb-a685-52ab93730f55\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.610098 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-scripts\") pod \"1b78e12a-b4be-4beb-a685-52ab93730f55\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.610121 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-combined-ca-bundle\") pod \"1b78e12a-b4be-4beb-a685-52ab93730f55\" (UID: \"1b78e12a-b4be-4beb-a685-52ab93730f55\") " Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.614147 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1b78e12a-b4be-4beb-a685-52ab93730f55-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1b78e12a-b4be-4beb-a685-52ab93730f55" (UID: "1b78e12a-b4be-4beb-a685-52ab93730f55"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.615332 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b78e12a-b4be-4beb-a685-52ab93730f55-logs" (OuterVolumeSpecName: "logs") pod "1b78e12a-b4be-4beb-a685-52ab93730f55" (UID: "1b78e12a-b4be-4beb-a685-52ab93730f55"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.621010 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1b78e12a-b4be-4beb-a685-52ab93730f55" (UID: "1b78e12a-b4be-4beb-a685-52ab93730f55"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.624721 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b78e12a-b4be-4beb-a685-52ab93730f55-kube-api-access-b6t66" (OuterVolumeSpecName: "kube-api-access-b6t66") pod "1b78e12a-b4be-4beb-a685-52ab93730f55" (UID: "1b78e12a-b4be-4beb-a685-52ab93730f55"). InnerVolumeSpecName "kube-api-access-b6t66". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.625691 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-scripts" (OuterVolumeSpecName: "scripts") pod "1b78e12a-b4be-4beb-a685-52ab93730f55" (UID: "1b78e12a-b4be-4beb-a685-52ab93730f55"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.670599 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b78e12a-b4be-4beb-a685-52ab93730f55" (UID: "1b78e12a-b4be-4beb-a685-52ab93730f55"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.688087 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-68d9dc6bf6-nhg96"] Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.713150 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b78e12a-b4be-4beb-a685-52ab93730f55-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.713194 4800 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.713208 4800 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1b78e12a-b4be-4beb-a685-52ab93730f55-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.713216 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6t66\" (UniqueName: \"kubernetes.io/projected/1b78e12a-b4be-4beb-a685-52ab93730f55-kube-api-access-b6t66\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.713225 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.713233 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.725177 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-config-data" (OuterVolumeSpecName: "config-data") pod "1b78e12a-b4be-4beb-a685-52ab93730f55" (UID: "1b78e12a-b4be-4beb-a685-52ab93730f55"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.800791 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bdf98904-e8fc-4c69-9dc7-5e522c269236" path="/var/lib/kubelet/pods/bdf98904-e8fc-4c69-9dc7-5e522c269236/volumes" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.801719 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2f9ee92-fddb-49cf-bb5c-de3435545b92" path="/var/lib/kubelet/pods/c2f9ee92-fddb-49cf-bb5c-de3435545b92/volumes" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.802348 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3674fdb-30d8-402d-b9a7-419574d7a0c9" path="/var/lib/kubelet/pods/d3674fdb-30d8-402d-b9a7-419574d7a0c9/volumes" Nov 25 15:39:43 crc kubenswrapper[4800]: I1125 15:39:43.815572 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b78e12a-b4be-4beb-a685-52ab93730f55-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.213278 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.257320 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce","Type":"ContainerStarted","Data":"2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a"} Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.260106 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.260096 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1b78e12a-b4be-4beb-a685-52ab93730f55","Type":"ContainerDied","Data":"570483f8174e076ed7c8838b122543c4109103548b09f138c5edde87a697ec57"} Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.260199 4800 scope.go:117] "RemoveContainer" containerID="fcb3b1ed4be7e558cc4f53d4d1ca76e63f2de39cbcc86e188165f441b59e353b" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.267604 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68d9dc6bf6-nhg96" event={"ID":"320fcdb8-a11c-411f-aa8c-b0c89011b857","Type":"ContainerStarted","Data":"c94f2ecf9e93df2c121e821e5698c039567e3b4bbae7e7fa5e401c8fdcf9b223"} Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.267807 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.267868 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.267882 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68d9dc6bf6-nhg96" event={"ID":"320fcdb8-a11c-411f-aa8c-b0c89011b857","Type":"ContainerStarted","Data":"c7f84fae916affa80994f30b530a4c3f6c918d871975057d02696669a298954d"} Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.267902 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68d9dc6bf6-nhg96" event={"ID":"320fcdb8-a11c-411f-aa8c-b0c89011b857","Type":"ContainerStarted","Data":"448bb37e941ef0cfc3fc9d13241437fab72881ae3e41f5ea621af7c4e04f88e2"} Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.287267 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.295416 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.295826 4800 scope.go:117] "RemoveContainer" containerID="ebf3fe36beb419ce176fe4595d780b0adfcab20f29e5250f830f9baa308e2706" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.314011 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 15:39:44 crc kubenswrapper[4800]: E1125 15:39:44.314437 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b78e12a-b4be-4beb-a685-52ab93730f55" containerName="cinder-api-log" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.314456 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b78e12a-b4be-4beb-a685-52ab93730f55" containerName="cinder-api-log" Nov 25 15:39:44 crc kubenswrapper[4800]: E1125 15:39:44.314477 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b78e12a-b4be-4beb-a685-52ab93730f55" containerName="cinder-api" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.314483 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b78e12a-b4be-4beb-a685-52ab93730f55" containerName="cinder-api" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.314685 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b78e12a-b4be-4beb-a685-52ab93730f55" containerName="cinder-api-log" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.314714 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b78e12a-b4be-4beb-a685-52ab93730f55" containerName="cinder-api" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.315748 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.317048 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-68d9dc6bf6-nhg96" podStartSLOduration=2.317013046 podStartE2EDuration="2.317013046s" podCreationTimestamp="2025-11-25 15:39:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:44.305367449 +0000 UTC m=+1345.359775931" watchObservedRunningTime="2025-11-25 15:39:44.317013046 +0000 UTC m=+1345.371421528" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.319529 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.319752 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.320737 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.325116 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.354257 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.428085 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-config-data-custom\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.428158 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f9044ea8-7c07-4552-b140-6545060d3f53-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.428179 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9044ea8-7c07-4552-b140-6545060d3f53-logs\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.428224 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.428256 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnmzx\" (UniqueName: \"kubernetes.io/projected/f9044ea8-7c07-4552-b140-6545060d3f53-kube-api-access-lnmzx\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.428294 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-public-tls-certs\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.428343 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.428409 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-config-data\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.428434 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-scripts\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.469919 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.539148 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-config-data-custom\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.539314 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f9044ea8-7c07-4552-b140-6545060d3f53-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.539351 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9044ea8-7c07-4552-b140-6545060d3f53-logs\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.539502 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.539572 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnmzx\" (UniqueName: \"kubernetes.io/projected/f9044ea8-7c07-4552-b140-6545060d3f53-kube-api-access-lnmzx\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.539635 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-public-tls-certs\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.539860 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.540085 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-config-data\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.540152 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-scripts\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.546914 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f9044ea8-7c07-4552-b140-6545060d3f53-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.550498 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9044ea8-7c07-4552-b140-6545060d3f53-logs\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.564138 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.602634 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-config-data-custom\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.607362 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-config-data\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.614280 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-scripts\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.614572 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.614799 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-public-tls-certs\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.615766 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9044ea8-7c07-4552-b140-6545060d3f53-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.654360 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnmzx\" (UniqueName: \"kubernetes.io/projected/f9044ea8-7c07-4552-b140-6545060d3f53-kube-api-access-lnmzx\") pod \"cinder-api-0\" (UID: \"f9044ea8-7c07-4552-b140-6545060d3f53\") " pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.655391 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.823606 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.836920 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f8f5cc67-jq58c"] Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.837324 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" podUID="4fb201a0-5816-4233-a048-40b018b1ad05" containerName="dnsmasq-dns" containerID="cri-o://43c886198ada75d90c0025d8df6d40776e9cb1a55970c31b8099d6e867e12085" gracePeriod=10 Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.901640 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-56fb8dbc98-w4xzj" Nov 25 15:39:44 crc kubenswrapper[4800]: I1125 15:39:44.929207 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.019170 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7c78ff894b-2g5wf"] Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.161008 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" podUID="4fb201a0-5816-4233-a048-40b018b1ad05" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: connect: connection refused" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.282219 4800 generic.go:334] "Generic (PLEG): container finished" podID="4fb201a0-5816-4233-a048-40b018b1ad05" containerID="43c886198ada75d90c0025d8df6d40776e9cb1a55970c31b8099d6e867e12085" exitCode=0 Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.282302 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" event={"ID":"4fb201a0-5816-4233-a048-40b018b1ad05","Type":"ContainerDied","Data":"43c886198ada75d90c0025d8df6d40776e9cb1a55970c31b8099d6e867e12085"} Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.295491 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce","Type":"ContainerStarted","Data":"62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb"} Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.295591 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.300151 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="1589827e-3ef5-4aea-bbfd-2a783d6deb83" containerName="cinder-scheduler" containerID="cri-o://4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a" gracePeriod=30 Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.300462 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="1589827e-3ef5-4aea-bbfd-2a783d6deb83" containerName="probe" containerID="cri-o://097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d" gracePeriod=30 Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.300636 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7c78ff894b-2g5wf" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerName="horizon-log" containerID="cri-o://9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55" gracePeriod=30 Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.300717 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7c78ff894b-2g5wf" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerName="horizon" containerID="cri-o://d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94" gracePeriod=30 Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.468674 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.098117881 podStartE2EDuration="7.46865268s" podCreationTimestamp="2025-11-25 15:39:38 +0000 UTC" firstStartedPulling="2025-11-25 15:39:39.430271683 +0000 UTC m=+1340.484680165" lastFinishedPulling="2025-11-25 15:39:44.800806482 +0000 UTC m=+1345.855214964" observedRunningTime="2025-11-25 15:39:45.32372877 +0000 UTC m=+1346.378137252" watchObservedRunningTime="2025-11-25 15:39:45.46865268 +0000 UTC m=+1346.523061162" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.470978 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.649608 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.701593 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-ovsdbserver-nb\") pod \"4fb201a0-5816-4233-a048-40b018b1ad05\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.701726 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-949mb\" (UniqueName: \"kubernetes.io/projected/4fb201a0-5816-4233-a048-40b018b1ad05-kube-api-access-949mb\") pod \"4fb201a0-5816-4233-a048-40b018b1ad05\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.701760 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-dns-svc\") pod \"4fb201a0-5816-4233-a048-40b018b1ad05\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.701810 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-config\") pod \"4fb201a0-5816-4233-a048-40b018b1ad05\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.702136 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-ovsdbserver-sb\") pod \"4fb201a0-5816-4233-a048-40b018b1ad05\" (UID: \"4fb201a0-5816-4233-a048-40b018b1ad05\") " Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.722642 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fb201a0-5816-4233-a048-40b018b1ad05-kube-api-access-949mb" (OuterVolumeSpecName: "kube-api-access-949mb") pod "4fb201a0-5816-4233-a048-40b018b1ad05" (UID: "4fb201a0-5816-4233-a048-40b018b1ad05"). InnerVolumeSpecName "kube-api-access-949mb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.814607 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-949mb\" (UniqueName: \"kubernetes.io/projected/4fb201a0-5816-4233-a048-40b018b1ad05-kube-api-access-949mb\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.832521 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4fb201a0-5816-4233-a048-40b018b1ad05" (UID: "4fb201a0-5816-4233-a048-40b018b1ad05"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.836725 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b78e12a-b4be-4beb-a685-52ab93730f55" path="/var/lib/kubelet/pods/1b78e12a-b4be-4beb-a685-52ab93730f55/volumes" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.871562 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4fb201a0-5816-4233-a048-40b018b1ad05" (UID: "4fb201a0-5816-4233-a048-40b018b1ad05"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.910442 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4fb201a0-5816-4233-a048-40b018b1ad05" (UID: "4fb201a0-5816-4233-a048-40b018b1ad05"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.917486 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.917531 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.917540 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:45 crc kubenswrapper[4800]: I1125 15:39:45.944591 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-config" (OuterVolumeSpecName: "config") pod "4fb201a0-5816-4233-a048-40b018b1ad05" (UID: "4fb201a0-5816-4233-a048-40b018b1ad05"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:46 crc kubenswrapper[4800]: I1125 15:39:46.021281 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fb201a0-5816-4233-a048-40b018b1ad05-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:46 crc kubenswrapper[4800]: I1125 15:39:46.318457 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" event={"ID":"4fb201a0-5816-4233-a048-40b018b1ad05","Type":"ContainerDied","Data":"a1091d133f4b1dd5f980ef6d4df2e7c6bb8ff304aff1c43bbb819e7d59ff9445"} Nov 25 15:39:46 crc kubenswrapper[4800]: I1125 15:39:46.318525 4800 scope.go:117] "RemoveContainer" containerID="43c886198ada75d90c0025d8df6d40776e9cb1a55970c31b8099d6e867e12085" Nov 25 15:39:46 crc kubenswrapper[4800]: I1125 15:39:46.318699 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f8f5cc67-jq58c" Nov 25 15:39:46 crc kubenswrapper[4800]: I1125 15:39:46.342990 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f9044ea8-7c07-4552-b140-6545060d3f53","Type":"ContainerStarted","Data":"80f3438a3765b388f0842bee7fcbae2e83ae98ad4352ea02b2780cdc78b01abe"} Nov 25 15:39:46 crc kubenswrapper[4800]: I1125 15:39:46.445354 4800 scope.go:117] "RemoveContainer" containerID="7d04d63feaffcf605df56834e3f9144df0aa6f7835f80afa1d7928242eb852ae" Nov 25 15:39:46 crc kubenswrapper[4800]: I1125 15:39:46.479175 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f8f5cc67-jq58c"] Nov 25 15:39:46 crc kubenswrapper[4800]: I1125 15:39:46.494396 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f8f5cc67-jq58c"] Nov 25 15:39:46 crc kubenswrapper[4800]: I1125 15:39:46.945074 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.074324 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-scripts\") pod \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.074427 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9qtd\" (UniqueName: \"kubernetes.io/projected/1589827e-3ef5-4aea-bbfd-2a783d6deb83-kube-api-access-w9qtd\") pod \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.074486 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-config-data-custom\") pod \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.074524 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1589827e-3ef5-4aea-bbfd-2a783d6deb83-etc-machine-id\") pod \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.074604 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-combined-ca-bundle\") pod \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.074627 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-config-data\") pod \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\" (UID: \"1589827e-3ef5-4aea-bbfd-2a783d6deb83\") " Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.075251 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1589827e-3ef5-4aea-bbfd-2a783d6deb83-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1589827e-3ef5-4aea-bbfd-2a783d6deb83" (UID: "1589827e-3ef5-4aea-bbfd-2a783d6deb83"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.081179 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1589827e-3ef5-4aea-bbfd-2a783d6deb83-kube-api-access-w9qtd" (OuterVolumeSpecName: "kube-api-access-w9qtd") pod "1589827e-3ef5-4aea-bbfd-2a783d6deb83" (UID: "1589827e-3ef5-4aea-bbfd-2a783d6deb83"). InnerVolumeSpecName "kube-api-access-w9qtd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.081307 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-scripts" (OuterVolumeSpecName: "scripts") pod "1589827e-3ef5-4aea-bbfd-2a783d6deb83" (UID: "1589827e-3ef5-4aea-bbfd-2a783d6deb83"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.083991 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1589827e-3ef5-4aea-bbfd-2a783d6deb83" (UID: "1589827e-3ef5-4aea-bbfd-2a783d6deb83"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.149116 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1589827e-3ef5-4aea-bbfd-2a783d6deb83" (UID: "1589827e-3ef5-4aea-bbfd-2a783d6deb83"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.177829 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9qtd\" (UniqueName: \"kubernetes.io/projected/1589827e-3ef5-4aea-bbfd-2a783d6deb83-kube-api-access-w9qtd\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.177888 4800 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.177898 4800 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1589827e-3ef5-4aea-bbfd-2a783d6deb83-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.177907 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.177916 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.205957 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-config-data" (OuterVolumeSpecName: "config-data") pod "1589827e-3ef5-4aea-bbfd-2a783d6deb83" (UID: "1589827e-3ef5-4aea-bbfd-2a783d6deb83"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.282277 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1589827e-3ef5-4aea-bbfd-2a783d6deb83-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.358600 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f9044ea8-7c07-4552-b140-6545060d3f53","Type":"ContainerStarted","Data":"9a39a97e2980d0021d6395acd773e104b3983e292093b52f0a13f0823d7fad20"} Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.362030 4800 generic.go:334] "Generic (PLEG): container finished" podID="1589827e-3ef5-4aea-bbfd-2a783d6deb83" containerID="097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d" exitCode=0 Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.362079 4800 generic.go:334] "Generic (PLEG): container finished" podID="1589827e-3ef5-4aea-bbfd-2a783d6deb83" containerID="4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a" exitCode=0 Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.362213 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.362294 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1589827e-3ef5-4aea-bbfd-2a783d6deb83","Type":"ContainerDied","Data":"097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d"} Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.362381 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1589827e-3ef5-4aea-bbfd-2a783d6deb83","Type":"ContainerDied","Data":"4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a"} Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.362401 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1589827e-3ef5-4aea-bbfd-2a783d6deb83","Type":"ContainerDied","Data":"7600362049a0dd5fe4466d687daeadfe122c5abce8cddeaf7644599efa31a4c8"} Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.362431 4800 scope.go:117] "RemoveContainer" containerID="097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.408942 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.446062 4800 scope.go:117] "RemoveContainer" containerID="4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.446819 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.455799 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 15:39:47 crc kubenswrapper[4800]: E1125 15:39:47.456455 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb201a0-5816-4233-a048-40b018b1ad05" containerName="dnsmasq-dns" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.456481 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb201a0-5816-4233-a048-40b018b1ad05" containerName="dnsmasq-dns" Nov 25 15:39:47 crc kubenswrapper[4800]: E1125 15:39:47.456497 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1589827e-3ef5-4aea-bbfd-2a783d6deb83" containerName="cinder-scheduler" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.457164 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1589827e-3ef5-4aea-bbfd-2a783d6deb83" containerName="cinder-scheduler" Nov 25 15:39:47 crc kubenswrapper[4800]: E1125 15:39:47.457192 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1589827e-3ef5-4aea-bbfd-2a783d6deb83" containerName="probe" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.457203 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1589827e-3ef5-4aea-bbfd-2a783d6deb83" containerName="probe" Nov 25 15:39:47 crc kubenswrapper[4800]: E1125 15:39:47.457225 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb201a0-5816-4233-a048-40b018b1ad05" containerName="init" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.457240 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb201a0-5816-4233-a048-40b018b1ad05" containerName="init" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.457763 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1589827e-3ef5-4aea-bbfd-2a783d6deb83" containerName="cinder-scheduler" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.457807 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1589827e-3ef5-4aea-bbfd-2a783d6deb83" containerName="probe" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.457824 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb201a0-5816-4233-a048-40b018b1ad05" containerName="dnsmasq-dns" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.459600 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.462797 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.465295 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.500111 4800 scope.go:117] "RemoveContainer" containerID="097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d" Nov 25 15:39:47 crc kubenswrapper[4800]: E1125 15:39:47.500677 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d\": container with ID starting with 097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d not found: ID does not exist" containerID="097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.500715 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d"} err="failed to get container status \"097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d\": rpc error: code = NotFound desc = could not find container \"097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d\": container with ID starting with 097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d not found: ID does not exist" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.500745 4800 scope.go:117] "RemoveContainer" containerID="4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a" Nov 25 15:39:47 crc kubenswrapper[4800]: E1125 15:39:47.501242 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a\": container with ID starting with 4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a not found: ID does not exist" containerID="4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.501274 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a"} err="failed to get container status \"4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a\": rpc error: code = NotFound desc = could not find container \"4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a\": container with ID starting with 4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a not found: ID does not exist" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.501298 4800 scope.go:117] "RemoveContainer" containerID="097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.501980 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d"} err="failed to get container status \"097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d\": rpc error: code = NotFound desc = could not find container \"097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d\": container with ID starting with 097c765bd5bd82b0c70a279b19944064386d496125c9645a7e64092756f2fd6d not found: ID does not exist" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.502002 4800 scope.go:117] "RemoveContainer" containerID="4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.503134 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a"} err="failed to get container status \"4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a\": rpc error: code = NotFound desc = could not find container \"4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a\": container with ID starting with 4b80e304a573e0654067d4a3f6bb4f914c65c41e2305f076f96dafae2a4a260a not found: ID does not exist" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.587085 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3409070-5204-4027-b692-201d89bbb758-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.587596 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3409070-5204-4027-b692-201d89bbb758-config-data\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.587959 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3409070-5204-4027-b692-201d89bbb758-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.588273 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3409070-5204-4027-b692-201d89bbb758-scripts\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.588343 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7xnw\" (UniqueName: \"kubernetes.io/projected/b3409070-5204-4027-b692-201d89bbb758-kube-api-access-t7xnw\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.588577 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b3409070-5204-4027-b692-201d89bbb758-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.690696 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3409070-5204-4027-b692-201d89bbb758-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.691159 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3409070-5204-4027-b692-201d89bbb758-scripts\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.691185 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7xnw\" (UniqueName: \"kubernetes.io/projected/b3409070-5204-4027-b692-201d89bbb758-kube-api-access-t7xnw\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.691248 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b3409070-5204-4027-b692-201d89bbb758-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.691282 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3409070-5204-4027-b692-201d89bbb758-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.691326 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3409070-5204-4027-b692-201d89bbb758-config-data\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.691717 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b3409070-5204-4027-b692-201d89bbb758-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.698604 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3409070-5204-4027-b692-201d89bbb758-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.698684 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3409070-5204-4027-b692-201d89bbb758-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.698923 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3409070-5204-4027-b692-201d89bbb758-config-data\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.700684 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3409070-5204-4027-b692-201d89bbb758-scripts\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.712950 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7xnw\" (UniqueName: \"kubernetes.io/projected/b3409070-5204-4027-b692-201d89bbb758-kube-api-access-t7xnw\") pod \"cinder-scheduler-0\" (UID: \"b3409070-5204-4027-b692-201d89bbb758\") " pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.786720 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.800917 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1589827e-3ef5-4aea-bbfd-2a783d6deb83" path="/var/lib/kubelet/pods/1589827e-3ef5-4aea-bbfd-2a783d6deb83/volumes" Nov 25 15:39:47 crc kubenswrapper[4800]: I1125 15:39:47.801821 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fb201a0-5816-4233-a048-40b018b1ad05" path="/var/lib/kubelet/pods/4fb201a0-5816-4233-a048-40b018b1ad05/volumes" Nov 25 15:39:48 crc kubenswrapper[4800]: I1125 15:39:48.248621 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 15:39:48 crc kubenswrapper[4800]: I1125 15:39:48.379780 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b3409070-5204-4027-b692-201d89bbb758","Type":"ContainerStarted","Data":"7fcb09cc83fe7a3aa8ac644c47beff995a962d2f75b058b7c4aed379596ad7f0"} Nov 25 15:39:48 crc kubenswrapper[4800]: I1125 15:39:48.385186 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f9044ea8-7c07-4552-b140-6545060d3f53","Type":"ContainerStarted","Data":"ab21fbf1d63782a30b893286df58d9747df25a4d67b3ec371517abeadd4fb7e2"} Nov 25 15:39:48 crc kubenswrapper[4800]: I1125 15:39:48.386507 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 15:39:48 crc kubenswrapper[4800]: I1125 15:39:48.428130 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.428098687 podStartE2EDuration="4.428098687s" podCreationTimestamp="2025-11-25 15:39:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:48.413501261 +0000 UTC m=+1349.467909753" watchObservedRunningTime="2025-11-25 15:39:48.428098687 +0000 UTC m=+1349.482507169" Nov 25 15:39:48 crc kubenswrapper[4800]: I1125 15:39:48.529272 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7c78ff894b-2g5wf" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.138:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:35408->10.217.0.138:8443: read: connection reset by peer" Nov 25 15:39:49 crc kubenswrapper[4800]: I1125 15:39:49.425755 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b3409070-5204-4027-b692-201d89bbb758","Type":"ContainerStarted","Data":"9fae2e65c2b51691b636f54e3b2f6d88fa3bdca8ab22fe9476d6c087cf734e6f"} Nov 25 15:39:49 crc kubenswrapper[4800]: I1125 15:39:49.428166 4800 generic.go:334] "Generic (PLEG): container finished" podID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerID="d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94" exitCode=0 Nov 25 15:39:49 crc kubenswrapper[4800]: I1125 15:39:49.428255 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7c78ff894b-2g5wf" event={"ID":"0e341748-e3fe-4c2d-933e-fdea97ee66b6","Type":"ContainerDied","Data":"d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94"} Nov 25 15:39:50 crc kubenswrapper[4800]: I1125 15:39:50.440561 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b3409070-5204-4027-b692-201d89bbb758","Type":"ContainerStarted","Data":"dac49b600f3bb3870dedb74cba57fbeb154aced5d00e35f9b8a517b5b31cbb54"} Nov 25 15:39:50 crc kubenswrapper[4800]: I1125 15:39:50.472880 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.472833895 podStartE2EDuration="3.472833895s" podCreationTimestamp="2025-11-25 15:39:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:39:50.46379524 +0000 UTC m=+1351.518203732" watchObservedRunningTime="2025-11-25 15:39:50.472833895 +0000 UTC m=+1351.527242377" Nov 25 15:39:50 crc kubenswrapper[4800]: E1125 15:39:50.881394 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e726809_c215_4d1a_95a3_d0fadede3cca.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e726809_c215_4d1a_95a3_d0fadede3cca.slice/crio-126e343945a386573f7a985814f180bc379c483865c325a1d87fbca3fd91cda2\": RecentStats: unable to find data in memory cache]" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.149245 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7dfbf776bb-kgx2k" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.448290 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.458284 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.458494 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.466325 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-vqwgw" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.466697 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.469500 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.594073 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.594196 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-openstack-config\") pod \"openstackclient\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.594255 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-openstack-config-secret\") pod \"openstackclient\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.594288 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rg2cd\" (UniqueName: \"kubernetes.io/projected/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-kube-api-access-rg2cd\") pod \"openstackclient\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.695992 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-openstack-config-secret\") pod \"openstackclient\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.696092 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2cd\" (UniqueName: \"kubernetes.io/projected/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-kube-api-access-rg2cd\") pod \"openstackclient\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.696553 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.697126 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-openstack-config\") pod \"openstackclient\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.697749 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-openstack-config\") pod \"openstackclient\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.703740 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.712550 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 25 15:39:51 crc kubenswrapper[4800]: E1125 15:39:51.713544 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-rg2cd openstack-config-secret], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/openstackclient" podUID="f7215e48-b9aa-4315-b4c2-2b9a502d14ca" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.715272 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-openstack-config-secret\") pod \"openstackclient\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.739614 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rg2cd\" (UniqueName: \"kubernetes.io/projected/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-kube-api-access-rg2cd\") pod \"openstackclient\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.754824 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.810004 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.811535 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.840341 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.903166 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/504ea677-7f03-49bd-a420-ab472ab48709-combined-ca-bundle\") pod \"openstackclient\" (UID: \"504ea677-7f03-49bd-a420-ab472ab48709\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.903250 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/504ea677-7f03-49bd-a420-ab472ab48709-openstack-config\") pod \"openstackclient\" (UID: \"504ea677-7f03-49bd-a420-ab472ab48709\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.903277 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2r9pw\" (UniqueName: \"kubernetes.io/projected/504ea677-7f03-49bd-a420-ab472ab48709-kube-api-access-2r9pw\") pod \"openstackclient\" (UID: \"504ea677-7f03-49bd-a420-ab472ab48709\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.903323 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/504ea677-7f03-49bd-a420-ab472ab48709-openstack-config-secret\") pod \"openstackclient\" (UID: \"504ea677-7f03-49bd-a420-ab472ab48709\") " pod="openstack/openstackclient" Nov 25 15:39:51 crc kubenswrapper[4800]: I1125 15:39:51.921695 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.003681 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6c9d4bc54d-drmz2" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.004648 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/504ea677-7f03-49bd-a420-ab472ab48709-openstack-config-secret\") pod \"openstackclient\" (UID: \"504ea677-7f03-49bd-a420-ab472ab48709\") " pod="openstack/openstackclient" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.004868 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/504ea677-7f03-49bd-a420-ab472ab48709-combined-ca-bundle\") pod \"openstackclient\" (UID: \"504ea677-7f03-49bd-a420-ab472ab48709\") " pod="openstack/openstackclient" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.004929 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/504ea677-7f03-49bd-a420-ab472ab48709-openstack-config\") pod \"openstackclient\" (UID: \"504ea677-7f03-49bd-a420-ab472ab48709\") " pod="openstack/openstackclient" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.004954 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2r9pw\" (UniqueName: \"kubernetes.io/projected/504ea677-7f03-49bd-a420-ab472ab48709-kube-api-access-2r9pw\") pod \"openstackclient\" (UID: \"504ea677-7f03-49bd-a420-ab472ab48709\") " pod="openstack/openstackclient" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.007874 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/504ea677-7f03-49bd-a420-ab472ab48709-openstack-config\") pod \"openstackclient\" (UID: \"504ea677-7f03-49bd-a420-ab472ab48709\") " pod="openstack/openstackclient" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.014656 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/504ea677-7f03-49bd-a420-ab472ab48709-combined-ca-bundle\") pod \"openstackclient\" (UID: \"504ea677-7f03-49bd-a420-ab472ab48709\") " pod="openstack/openstackclient" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.025220 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/504ea677-7f03-49bd-a420-ab472ab48709-openstack-config-secret\") pod \"openstackclient\" (UID: \"504ea677-7f03-49bd-a420-ab472ab48709\") " pod="openstack/openstackclient" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.030580 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2r9pw\" (UniqueName: \"kubernetes.io/projected/504ea677-7f03-49bd-a420-ab472ab48709-kube-api-access-2r9pw\") pod \"openstackclient\" (UID: \"504ea677-7f03-49bd-a420-ab472ab48709\") " pod="openstack/openstackclient" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.153229 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.494281 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.516758 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.522135 4800 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="f7215e48-b9aa-4315-b4c2-2b9a502d14ca" podUID="504ea677-7f03-49bd-a420-ab472ab48709" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.617504 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-openstack-config\") pod \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.617709 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-combined-ca-bundle\") pod \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.617853 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rg2cd\" (UniqueName: \"kubernetes.io/projected/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-kube-api-access-rg2cd\") pod \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.617916 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-openstack-config-secret\") pod \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\" (UID: \"f7215e48-b9aa-4315-b4c2-2b9a502d14ca\") " Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.620579 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "f7215e48-b9aa-4315-b4c2-2b9a502d14ca" (UID: "f7215e48-b9aa-4315-b4c2-2b9a502d14ca"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.626995 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f7215e48-b9aa-4315-b4c2-2b9a502d14ca" (UID: "f7215e48-b9aa-4315-b4c2-2b9a502d14ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.631001 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "f7215e48-b9aa-4315-b4c2-2b9a502d14ca" (UID: "f7215e48-b9aa-4315-b4c2-2b9a502d14ca"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.651316 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-kube-api-access-rg2cd" (OuterVolumeSpecName: "kube-api-access-rg2cd") pod "f7215e48-b9aa-4315-b4c2-2b9a502d14ca" (UID: "f7215e48-b9aa-4315-b4c2-2b9a502d14ca"). InnerVolumeSpecName "kube-api-access-rg2cd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.720665 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rg2cd\" (UniqueName: \"kubernetes.io/projected/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-kube-api-access-rg2cd\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.720713 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.720726 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.720738 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7215e48-b9aa-4315-b4c2-2b9a502d14ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.788236 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 15:39:52 crc kubenswrapper[4800]: I1125 15:39:52.955525 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 15:39:53 crc kubenswrapper[4800]: I1125 15:39:53.504224 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 15:39:53 crc kubenswrapper[4800]: I1125 15:39:53.506047 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"504ea677-7f03-49bd-a420-ab472ab48709","Type":"ContainerStarted","Data":"9baaff6abaf192b964df64b0098bd163881cde7733d48ac74b5f10a028863366"} Nov 25 15:39:53 crc kubenswrapper[4800]: I1125 15:39:53.521820 4800 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="f7215e48-b9aa-4315-b4c2-2b9a502d14ca" podUID="504ea677-7f03-49bd-a420-ab472ab48709" Nov 25 15:39:53 crc kubenswrapper[4800]: I1125 15:39:53.796764 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7215e48-b9aa-4315-b4c2-2b9a502d14ca" path="/var/lib/kubelet/pods/f7215e48-b9aa-4315-b4c2-2b9a502d14ca/volumes" Nov 25 15:39:54 crc kubenswrapper[4800]: I1125 15:39:54.503778 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:54 crc kubenswrapper[4800]: I1125 15:39:54.506815 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-68d9dc6bf6-nhg96" Nov 25 15:39:54 crc kubenswrapper[4800]: I1125 15:39:54.622191 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-778948847d-68g7j"] Nov 25 15:39:54 crc kubenswrapper[4800]: I1125 15:39:54.623031 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-778948847d-68g7j" podUID="6855c149-842e-4bde-b262-447fb978ffa8" containerName="barbican-api-log" containerID="cri-o://1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c" gracePeriod=30 Nov 25 15:39:54 crc kubenswrapper[4800]: I1125 15:39:54.623411 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-778948847d-68g7j" podUID="6855c149-842e-4bde-b262-447fb978ffa8" containerName="barbican-api" containerID="cri-o://bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9" gracePeriod=30 Nov 25 15:39:55 crc kubenswrapper[4800]: I1125 15:39:55.546230 4800 generic.go:334] "Generic (PLEG): container finished" podID="6855c149-842e-4bde-b262-447fb978ffa8" containerID="1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c" exitCode=143 Nov 25 15:39:55 crc kubenswrapper[4800]: I1125 15:39:55.547601 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-778948847d-68g7j" event={"ID":"6855c149-842e-4bde-b262-447fb978ffa8","Type":"ContainerDied","Data":"1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c"} Nov 25 15:39:57 crc kubenswrapper[4800]: I1125 15:39:57.517543 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 15:39:57 crc kubenswrapper[4800]: I1125 15:39:57.682062 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7c78ff894b-2g5wf" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.138:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.138:8443: connect: connection refused" Nov 25 15:39:57 crc kubenswrapper[4800]: I1125 15:39:57.848241 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-778948847d-68g7j" podUID="6855c149-842e-4bde-b262-447fb978ffa8" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.147:9311/healthcheck\": read tcp 10.217.0.2:48010->10.217.0.147:9311: read: connection reset by peer" Nov 25 15:39:57 crc kubenswrapper[4800]: I1125 15:39:57.848241 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-778948847d-68g7j" podUID="6855c149-842e-4bde-b262-447fb978ffa8" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.147:9311/healthcheck\": read tcp 10.217.0.2:48014->10.217.0.147:9311: read: connection reset by peer" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.137057 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.360019 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.502597 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6855c149-842e-4bde-b262-447fb978ffa8-logs\") pod \"6855c149-842e-4bde-b262-447fb978ffa8\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.502689 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-config-data\") pod \"6855c149-842e-4bde-b262-447fb978ffa8\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.502856 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-config-data-custom\") pod \"6855c149-842e-4bde-b262-447fb978ffa8\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.502911 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-combined-ca-bundle\") pod \"6855c149-842e-4bde-b262-447fb978ffa8\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.502956 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cp25w\" (UniqueName: \"kubernetes.io/projected/6855c149-842e-4bde-b262-447fb978ffa8-kube-api-access-cp25w\") pod \"6855c149-842e-4bde-b262-447fb978ffa8\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.504159 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6855c149-842e-4bde-b262-447fb978ffa8-logs" (OuterVolumeSpecName: "logs") pod "6855c149-842e-4bde-b262-447fb978ffa8" (UID: "6855c149-842e-4bde-b262-447fb978ffa8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.504586 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6855c149-842e-4bde-b262-447fb978ffa8-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.533997 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6855c149-842e-4bde-b262-447fb978ffa8" (UID: "6855c149-842e-4bde-b262-447fb978ffa8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.534066 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6855c149-842e-4bde-b262-447fb978ffa8-kube-api-access-cp25w" (OuterVolumeSpecName: "kube-api-access-cp25w") pod "6855c149-842e-4bde-b262-447fb978ffa8" (UID: "6855c149-842e-4bde-b262-447fb978ffa8"). InnerVolumeSpecName "kube-api-access-cp25w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.605498 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6855c149-842e-4bde-b262-447fb978ffa8" (UID: "6855c149-842e-4bde-b262-447fb978ffa8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.605780 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-combined-ca-bundle\") pod \"6855c149-842e-4bde-b262-447fb978ffa8\" (UID: \"6855c149-842e-4bde-b262-447fb978ffa8\") " Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.606330 4800 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.606361 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cp25w\" (UniqueName: \"kubernetes.io/projected/6855c149-842e-4bde-b262-447fb978ffa8-kube-api-access-cp25w\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:58 crc kubenswrapper[4800]: W1125 15:39:58.606454 4800 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/6855c149-842e-4bde-b262-447fb978ffa8/volumes/kubernetes.io~secret/combined-ca-bundle Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.606471 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6855c149-842e-4bde-b262-447fb978ffa8" (UID: "6855c149-842e-4bde-b262-447fb978ffa8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.613555 4800 generic.go:334] "Generic (PLEG): container finished" podID="6855c149-842e-4bde-b262-447fb978ffa8" containerID="bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9" exitCode=0 Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.613611 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-778948847d-68g7j" event={"ID":"6855c149-842e-4bde-b262-447fb978ffa8","Type":"ContainerDied","Data":"bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9"} Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.613645 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-778948847d-68g7j" event={"ID":"6855c149-842e-4bde-b262-447fb978ffa8","Type":"ContainerDied","Data":"50f1ab51cb0d2451340e6512a358a03aa1266c273f4feaf77ee2ee82fd01cda5"} Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.613665 4800 scope.go:117] "RemoveContainer" containerID="bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.613892 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-778948847d-68g7j" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.633001 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-config-data" (OuterVolumeSpecName: "config-data") pod "6855c149-842e-4bde-b262-447fb978ffa8" (UID: "6855c149-842e-4bde-b262-447fb978ffa8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.652665 4800 scope.go:117] "RemoveContainer" containerID="1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.680159 4800 scope.go:117] "RemoveContainer" containerID="bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9" Nov 25 15:39:58 crc kubenswrapper[4800]: E1125 15:39:58.680640 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9\": container with ID starting with bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9 not found: ID does not exist" containerID="bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.680679 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9"} err="failed to get container status \"bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9\": rpc error: code = NotFound desc = could not find container \"bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9\": container with ID starting with bc1636994ba54dd26c3e3634fd3b1c141c86e65c5f954285ca8a66c6baf967f9 not found: ID does not exist" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.680704 4800 scope.go:117] "RemoveContainer" containerID="1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c" Nov 25 15:39:58 crc kubenswrapper[4800]: E1125 15:39:58.681087 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c\": container with ID starting with 1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c not found: ID does not exist" containerID="1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.681111 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c"} err="failed to get container status \"1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c\": rpc error: code = NotFound desc = could not find container \"1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c\": container with ID starting with 1e5108e3ff0a112ec4482646bdeef0b02c190a8b5f5051e0207ab71662daf17c not found: ID does not exist" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.708323 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.708365 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6855c149-842e-4bde-b262-447fb978ffa8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:39:58 crc kubenswrapper[4800]: I1125 15:39:58.998092 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-778948847d-68g7j"] Nov 25 15:39:59 crc kubenswrapper[4800]: I1125 15:39:59.008534 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-778948847d-68g7j"] Nov 25 15:39:59 crc kubenswrapper[4800]: I1125 15:39:59.797552 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6855c149-842e-4bde-b262-447fb978ffa8" path="/var/lib/kubelet/pods/6855c149-842e-4bde-b262-447fb978ffa8/volumes" Nov 25 15:40:01 crc kubenswrapper[4800]: E1125 15:40:01.169257 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e726809_c215_4d1a_95a3_d0fadede3cca.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e726809_c215_4d1a_95a3_d0fadede3cca.slice/crio-126e343945a386573f7a985814f180bc379c483865c325a1d87fbca3fd91cda2\": RecentStats: unable to find data in memory cache]" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.323535 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-m8npp"] Nov 25 15:40:01 crc kubenswrapper[4800]: E1125 15:40:01.326207 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6855c149-842e-4bde-b262-447fb978ffa8" containerName="barbican-api-log" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.326238 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="6855c149-842e-4bde-b262-447fb978ffa8" containerName="barbican-api-log" Nov 25 15:40:01 crc kubenswrapper[4800]: E1125 15:40:01.326256 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6855c149-842e-4bde-b262-447fb978ffa8" containerName="barbican-api" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.326266 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="6855c149-842e-4bde-b262-447fb978ffa8" containerName="barbican-api" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.326469 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="6855c149-842e-4bde-b262-447fb978ffa8" containerName="barbican-api-log" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.326486 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="6855c149-842e-4bde-b262-447fb978ffa8" containerName="barbican-api" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.327266 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-m8npp" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.360291 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-m8npp"] Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.360721 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-487b-account-create-5gkwt"] Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.364985 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-487b-account-create-5gkwt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.367968 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.370941 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-487b-account-create-5gkwt"] Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.437717 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-hjmpn"] Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.439406 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-hjmpn" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.448095 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-hjmpn"] Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.472595 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fa7d7c4-8163-4172-9ce7-96aae8e0e627-operator-scripts\") pod \"nova-api-db-create-m8npp\" (UID: \"1fa7d7c4-8163-4172-9ce7-96aae8e0e627\") " pod="openstack/nova-api-db-create-m8npp" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.472684 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a909488-3abb-437b-9136-0b5856ff1700-operator-scripts\") pod \"nova-api-487b-account-create-5gkwt\" (UID: \"3a909488-3abb-437b-9136-0b5856ff1700\") " pod="openstack/nova-api-487b-account-create-5gkwt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.472872 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6vcb\" (UniqueName: \"kubernetes.io/projected/3a909488-3abb-437b-9136-0b5856ff1700-kube-api-access-c6vcb\") pod \"nova-api-487b-account-create-5gkwt\" (UID: \"3a909488-3abb-437b-9136-0b5856ff1700\") " pod="openstack/nova-api-487b-account-create-5gkwt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.472898 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dcw2\" (UniqueName: \"kubernetes.io/projected/1fa7d7c4-8163-4172-9ce7-96aae8e0e627-kube-api-access-8dcw2\") pod \"nova-api-db-create-m8npp\" (UID: \"1fa7d7c4-8163-4172-9ce7-96aae8e0e627\") " pod="openstack/nova-api-db-create-m8npp" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.533238 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-ms2dt"] Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.536877 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-ms2dt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.550139 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-b636-account-create-6dm4h"] Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.553511 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-b636-account-create-6dm4h" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.555582 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.565490 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-ms2dt"] Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.573398 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-b636-account-create-6dm4h"] Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.575998 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fa7d7c4-8163-4172-9ce7-96aae8e0e627-operator-scripts\") pod \"nova-api-db-create-m8npp\" (UID: \"1fa7d7c4-8163-4172-9ce7-96aae8e0e627\") " pod="openstack/nova-api-db-create-m8npp" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.576164 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a909488-3abb-437b-9136-0b5856ff1700-operator-scripts\") pod \"nova-api-487b-account-create-5gkwt\" (UID: \"3a909488-3abb-437b-9136-0b5856ff1700\") " pod="openstack/nova-api-487b-account-create-5gkwt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.576391 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6vcb\" (UniqueName: \"kubernetes.io/projected/3a909488-3abb-437b-9136-0b5856ff1700-kube-api-access-c6vcb\") pod \"nova-api-487b-account-create-5gkwt\" (UID: \"3a909488-3abb-437b-9136-0b5856ff1700\") " pod="openstack/nova-api-487b-account-create-5gkwt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.576444 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcw2\" (UniqueName: \"kubernetes.io/projected/1fa7d7c4-8163-4172-9ce7-96aae8e0e627-kube-api-access-8dcw2\") pod \"nova-api-db-create-m8npp\" (UID: \"1fa7d7c4-8163-4172-9ce7-96aae8e0e627\") " pod="openstack/nova-api-db-create-m8npp" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.576568 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knxb6\" (UniqueName: \"kubernetes.io/projected/03a21aaa-366d-48c9-bf08-3ea77b154123-kube-api-access-knxb6\") pod \"nova-cell0-db-create-hjmpn\" (UID: \"03a21aaa-366d-48c9-bf08-3ea77b154123\") " pod="openstack/nova-cell0-db-create-hjmpn" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.576681 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03a21aaa-366d-48c9-bf08-3ea77b154123-operator-scripts\") pod \"nova-cell0-db-create-hjmpn\" (UID: \"03a21aaa-366d-48c9-bf08-3ea77b154123\") " pod="openstack/nova-cell0-db-create-hjmpn" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.577009 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a909488-3abb-437b-9136-0b5856ff1700-operator-scripts\") pod \"nova-api-487b-account-create-5gkwt\" (UID: \"3a909488-3abb-437b-9136-0b5856ff1700\") " pod="openstack/nova-api-487b-account-create-5gkwt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.578185 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fa7d7c4-8163-4172-9ce7-96aae8e0e627-operator-scripts\") pod \"nova-api-db-create-m8npp\" (UID: \"1fa7d7c4-8163-4172-9ce7-96aae8e0e627\") " pod="openstack/nova-api-db-create-m8npp" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.602093 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6vcb\" (UniqueName: \"kubernetes.io/projected/3a909488-3abb-437b-9136-0b5856ff1700-kube-api-access-c6vcb\") pod \"nova-api-487b-account-create-5gkwt\" (UID: \"3a909488-3abb-437b-9136-0b5856ff1700\") " pod="openstack/nova-api-487b-account-create-5gkwt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.608691 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dcw2\" (UniqueName: \"kubernetes.io/projected/1fa7d7c4-8163-4172-9ce7-96aae8e0e627-kube-api-access-8dcw2\") pod \"nova-api-db-create-m8npp\" (UID: \"1fa7d7c4-8163-4172-9ce7-96aae8e0e627\") " pod="openstack/nova-api-db-create-m8npp" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.649159 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-m8npp" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.678853 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1da2fa33-150f-4450-8504-ac3d17932a8e-operator-scripts\") pod \"nova-cell0-b636-account-create-6dm4h\" (UID: \"1da2fa33-150f-4450-8504-ac3d17932a8e\") " pod="openstack/nova-cell0-b636-account-create-6dm4h" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.678929 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e-operator-scripts\") pod \"nova-cell1-db-create-ms2dt\" (UID: \"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e\") " pod="openstack/nova-cell1-db-create-ms2dt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.679014 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncdrm\" (UniqueName: \"kubernetes.io/projected/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e-kube-api-access-ncdrm\") pod \"nova-cell1-db-create-ms2dt\" (UID: \"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e\") " pod="openstack/nova-cell1-db-create-ms2dt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.679037 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgvhh\" (UniqueName: \"kubernetes.io/projected/1da2fa33-150f-4450-8504-ac3d17932a8e-kube-api-access-dgvhh\") pod \"nova-cell0-b636-account-create-6dm4h\" (UID: \"1da2fa33-150f-4450-8504-ac3d17932a8e\") " pod="openstack/nova-cell0-b636-account-create-6dm4h" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.679146 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knxb6\" (UniqueName: \"kubernetes.io/projected/03a21aaa-366d-48c9-bf08-3ea77b154123-kube-api-access-knxb6\") pod \"nova-cell0-db-create-hjmpn\" (UID: \"03a21aaa-366d-48c9-bf08-3ea77b154123\") " pod="openstack/nova-cell0-db-create-hjmpn" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.679185 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03a21aaa-366d-48c9-bf08-3ea77b154123-operator-scripts\") pod \"nova-cell0-db-create-hjmpn\" (UID: \"03a21aaa-366d-48c9-bf08-3ea77b154123\") " pod="openstack/nova-cell0-db-create-hjmpn" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.680382 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03a21aaa-366d-48c9-bf08-3ea77b154123-operator-scripts\") pod \"nova-cell0-db-create-hjmpn\" (UID: \"03a21aaa-366d-48c9-bf08-3ea77b154123\") " pod="openstack/nova-cell0-db-create-hjmpn" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.687662 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-487b-account-create-5gkwt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.698410 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knxb6\" (UniqueName: \"kubernetes.io/projected/03a21aaa-366d-48c9-bf08-3ea77b154123-kube-api-access-knxb6\") pod \"nova-cell0-db-create-hjmpn\" (UID: \"03a21aaa-366d-48c9-bf08-3ea77b154123\") " pod="openstack/nova-cell0-db-create-hjmpn" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.780584 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-hjmpn" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.781292 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncdrm\" (UniqueName: \"kubernetes.io/projected/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e-kube-api-access-ncdrm\") pod \"nova-cell1-db-create-ms2dt\" (UID: \"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e\") " pod="openstack/nova-cell1-db-create-ms2dt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.781354 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgvhh\" (UniqueName: \"kubernetes.io/projected/1da2fa33-150f-4450-8504-ac3d17932a8e-kube-api-access-dgvhh\") pod \"nova-cell0-b636-account-create-6dm4h\" (UID: \"1da2fa33-150f-4450-8504-ac3d17932a8e\") " pod="openstack/nova-cell0-b636-account-create-6dm4h" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.781463 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1da2fa33-150f-4450-8504-ac3d17932a8e-operator-scripts\") pod \"nova-cell0-b636-account-create-6dm4h\" (UID: \"1da2fa33-150f-4450-8504-ac3d17932a8e\") " pod="openstack/nova-cell0-b636-account-create-6dm4h" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.781501 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e-operator-scripts\") pod \"nova-cell1-db-create-ms2dt\" (UID: \"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e\") " pod="openstack/nova-cell1-db-create-ms2dt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.782342 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e-operator-scripts\") pod \"nova-cell1-db-create-ms2dt\" (UID: \"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e\") " pod="openstack/nova-cell1-db-create-ms2dt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.783053 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1da2fa33-150f-4450-8504-ac3d17932a8e-operator-scripts\") pod \"nova-cell0-b636-account-create-6dm4h\" (UID: \"1da2fa33-150f-4450-8504-ac3d17932a8e\") " pod="openstack/nova-cell0-b636-account-create-6dm4h" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.812543 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-7efa-account-create-7lrdr"] Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.815730 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncdrm\" (UniqueName: \"kubernetes.io/projected/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e-kube-api-access-ncdrm\") pod \"nova-cell1-db-create-ms2dt\" (UID: \"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e\") " pod="openstack/nova-cell1-db-create-ms2dt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.823856 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgvhh\" (UniqueName: \"kubernetes.io/projected/1da2fa33-150f-4450-8504-ac3d17932a8e-kube-api-access-dgvhh\") pod \"nova-cell0-b636-account-create-6dm4h\" (UID: \"1da2fa33-150f-4450-8504-ac3d17932a8e\") " pod="openstack/nova-cell0-b636-account-create-6dm4h" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.825125 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-7efa-account-create-7lrdr"] Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.825247 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7efa-account-create-7lrdr" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.829581 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.852357 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-ms2dt" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.869690 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-b636-account-create-6dm4h" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.889551 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/840d89d2-2842-4aa9-a9f5-4de3794dcb34-operator-scripts\") pod \"nova-cell1-7efa-account-create-7lrdr\" (UID: \"840d89d2-2842-4aa9-a9f5-4de3794dcb34\") " pod="openstack/nova-cell1-7efa-account-create-7lrdr" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.889636 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4ch6\" (UniqueName: \"kubernetes.io/projected/840d89d2-2842-4aa9-a9f5-4de3794dcb34-kube-api-access-h4ch6\") pod \"nova-cell1-7efa-account-create-7lrdr\" (UID: \"840d89d2-2842-4aa9-a9f5-4de3794dcb34\") " pod="openstack/nova-cell1-7efa-account-create-7lrdr" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.993060 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/840d89d2-2842-4aa9-a9f5-4de3794dcb34-operator-scripts\") pod \"nova-cell1-7efa-account-create-7lrdr\" (UID: \"840d89d2-2842-4aa9-a9f5-4de3794dcb34\") " pod="openstack/nova-cell1-7efa-account-create-7lrdr" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.993408 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4ch6\" (UniqueName: \"kubernetes.io/projected/840d89d2-2842-4aa9-a9f5-4de3794dcb34-kube-api-access-h4ch6\") pod \"nova-cell1-7efa-account-create-7lrdr\" (UID: \"840d89d2-2842-4aa9-a9f5-4de3794dcb34\") " pod="openstack/nova-cell1-7efa-account-create-7lrdr" Nov 25 15:40:01 crc kubenswrapper[4800]: I1125 15:40:01.994671 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/840d89d2-2842-4aa9-a9f5-4de3794dcb34-operator-scripts\") pod \"nova-cell1-7efa-account-create-7lrdr\" (UID: \"840d89d2-2842-4aa9-a9f5-4de3794dcb34\") " pod="openstack/nova-cell1-7efa-account-create-7lrdr" Nov 25 15:40:02 crc kubenswrapper[4800]: I1125 15:40:02.013388 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4ch6\" (UniqueName: \"kubernetes.io/projected/840d89d2-2842-4aa9-a9f5-4de3794dcb34-kube-api-access-h4ch6\") pod \"nova-cell1-7efa-account-create-7lrdr\" (UID: \"840d89d2-2842-4aa9-a9f5-4de3794dcb34\") " pod="openstack/nova-cell1-7efa-account-create-7lrdr" Nov 25 15:40:02 crc kubenswrapper[4800]: I1125 15:40:02.125490 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:40:02 crc kubenswrapper[4800]: I1125 15:40:02.245017 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7efa-account-create-7lrdr" Nov 25 15:40:05 crc kubenswrapper[4800]: I1125 15:40:05.522047 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:05 crc kubenswrapper[4800]: I1125 15:40:05.523038 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="ceilometer-central-agent" containerID="cri-o://382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404" gracePeriod=30 Nov 25 15:40:05 crc kubenswrapper[4800]: I1125 15:40:05.523108 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="proxy-httpd" containerID="cri-o://62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb" gracePeriod=30 Nov 25 15:40:05 crc kubenswrapper[4800]: I1125 15:40:05.523157 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="sg-core" containerID="cri-o://2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a" gracePeriod=30 Nov 25 15:40:05 crc kubenswrapper[4800]: I1125 15:40:05.523169 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="ceilometer-notification-agent" containerID="cri-o://3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb" gracePeriod=30 Nov 25 15:40:05 crc kubenswrapper[4800]: I1125 15:40:05.541460 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 15:40:05 crc kubenswrapper[4800]: I1125 15:40:05.795780 4800 generic.go:334] "Generic (PLEG): container finished" podID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerID="2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a" exitCode=2 Nov 25 15:40:05 crc kubenswrapper[4800]: I1125 15:40:05.835401 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce","Type":"ContainerDied","Data":"2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a"} Nov 25 15:40:06 crc kubenswrapper[4800]: W1125 15:40:06.199259 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1fa7d7c4_8163_4172_9ce7_96aae8e0e627.slice/crio-0a6eb618c3171bf724775c20e542864988611d5e0a8f60cb688e1a32e47f1894 WatchSource:0}: Error finding container 0a6eb618c3171bf724775c20e542864988611d5e0a8f60cb688e1a32e47f1894: Status 404 returned error can't find the container with id 0a6eb618c3171bf724775c20e542864988611d5e0a8f60cb688e1a32e47f1894 Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.204274 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-m8npp"] Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.386369 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-487b-account-create-5gkwt"] Nov 25 15:40:06 crc kubenswrapper[4800]: W1125 15:40:06.388370 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a909488_3abb_437b_9136_0b5856ff1700.slice/crio-50561e29f783bc51b79f52cfac3ae38c7cb7e7e6bd40035e85cd946caf94cd1b WatchSource:0}: Error finding container 50561e29f783bc51b79f52cfac3ae38c7cb7e7e6bd40035e85cd946caf94cd1b: Status 404 returned error can't find the container with id 50561e29f783bc51b79f52cfac3ae38c7cb7e7e6bd40035e85cd946caf94cd1b Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.401613 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-7efa-account-create-7lrdr"] Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.589647 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-hjmpn"] Nov 25 15:40:06 crc kubenswrapper[4800]: W1125 15:40:06.610158 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03a21aaa_366d_48c9_bf08_3ea77b154123.slice/crio-29a1e04178140daa113557af047a62eac0c792ad34304ebe492dba701b3af1f7 WatchSource:0}: Error finding container 29a1e04178140daa113557af047a62eac0c792ad34304ebe492dba701b3af1f7: Status 404 returned error can't find the container with id 29a1e04178140daa113557af047a62eac0c792ad34304ebe492dba701b3af1f7 Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.610183 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-ms2dt"] Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.622055 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-b636-account-create-6dm4h"] Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.810703 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-hjmpn" event={"ID":"03a21aaa-366d-48c9-bf08-3ea77b154123","Type":"ContainerStarted","Data":"29a1e04178140daa113557af047a62eac0c792ad34304ebe492dba701b3af1f7"} Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.812637 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-487b-account-create-5gkwt" event={"ID":"3a909488-3abb-437b-9136-0b5856ff1700","Type":"ContainerStarted","Data":"734e4da61e6068b40675801ff6b71f2ef520eabc8b67b51099a26a328990b9a2"} Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.812678 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-487b-account-create-5gkwt" event={"ID":"3a909488-3abb-437b-9136-0b5856ff1700","Type":"ContainerStarted","Data":"50561e29f783bc51b79f52cfac3ae38c7cb7e7e6bd40035e85cd946caf94cd1b"} Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.821767 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"504ea677-7f03-49bd-a420-ab472ab48709","Type":"ContainerStarted","Data":"60a9695393821e4e86983ba09dc7c7dc9ca88cf2b2ba71f47f7d1ef1aeea980f"} Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.837014 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-ms2dt" event={"ID":"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e","Type":"ContainerStarted","Data":"bc5300016e16576255121d197068dc3c489121bf9404febb5a0366f05f49cd7c"} Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.840619 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-487b-account-create-5gkwt" podStartSLOduration=5.84059396 podStartE2EDuration="5.84059396s" podCreationTimestamp="2025-11-25 15:40:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:40:06.83196134 +0000 UTC m=+1367.886369832" watchObservedRunningTime="2025-11-25 15:40:06.84059396 +0000 UTC m=+1367.895002432" Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.847160 4800 generic.go:334] "Generic (PLEG): container finished" podID="1fa7d7c4-8163-4172-9ce7-96aae8e0e627" containerID="dd8d93fa2221b4532728f6487b5417138c9468699b9ba960e2ede7e905bbaffc" exitCode=0 Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.847263 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-m8npp" event={"ID":"1fa7d7c4-8163-4172-9ce7-96aae8e0e627","Type":"ContainerDied","Data":"dd8d93fa2221b4532728f6487b5417138c9468699b9ba960e2ede7e905bbaffc"} Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.847307 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-m8npp" event={"ID":"1fa7d7c4-8163-4172-9ce7-96aae8e0e627","Type":"ContainerStarted","Data":"0a6eb618c3171bf724775c20e542864988611d5e0a8f60cb688e1a32e47f1894"} Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.850045 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7efa-account-create-7lrdr" event={"ID":"840d89d2-2842-4aa9-a9f5-4de3794dcb34","Type":"ContainerStarted","Data":"5d05629313196fe1ac37421302bc0bdd3b37f0f75d2e1e3e54c85319586ff517"} Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.850076 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7efa-account-create-7lrdr" event={"ID":"840d89d2-2842-4aa9-a9f5-4de3794dcb34","Type":"ContainerStarted","Data":"3659058e28fe69614b30ee206677ed9a38968e5c3f9e6925546e843bfb9be2e5"} Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.859152 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.067955442 podStartE2EDuration="15.859126746s" podCreationTimestamp="2025-11-25 15:39:51 +0000 UTC" firstStartedPulling="2025-11-25 15:39:52.96854319 +0000 UTC m=+1354.022951672" lastFinishedPulling="2025-11-25 15:40:05.759714494 +0000 UTC m=+1366.814122976" observedRunningTime="2025-11-25 15:40:06.849492528 +0000 UTC m=+1367.903901020" watchObservedRunningTime="2025-11-25 15:40:06.859126746 +0000 UTC m=+1367.913535228" Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.861003 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-b636-account-create-6dm4h" event={"ID":"1da2fa33-150f-4450-8504-ac3d17932a8e","Type":"ContainerStarted","Data":"6759ed01883a1a2efdbde13a066517858e3ec3d537cc93d216c53430877f8a9b"} Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.864201 4800 generic.go:334] "Generic (PLEG): container finished" podID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerID="62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb" exitCode=0 Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.864224 4800 generic.go:334] "Generic (PLEG): container finished" podID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerID="382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404" exitCode=0 Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.864244 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce","Type":"ContainerDied","Data":"62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb"} Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.864260 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce","Type":"ContainerDied","Data":"382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404"} Nov 25 15:40:06 crc kubenswrapper[4800]: I1125 15:40:06.895825 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-7efa-account-create-7lrdr" podStartSLOduration=5.895746346 podStartE2EDuration="5.895746346s" podCreationTimestamp="2025-11-25 15:40:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:40:06.890165911 +0000 UTC m=+1367.944574393" watchObservedRunningTime="2025-11-25 15:40:06.895746346 +0000 UTC m=+1367.950154828" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.394022 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.444164 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-config-data\") pod \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.444253 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-run-httpd\") pod \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.444300 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-scripts\") pod \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.444403 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-log-httpd\") pod \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.444473 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-combined-ca-bundle\") pod \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.444559 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhlkj\" (UniqueName: \"kubernetes.io/projected/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-kube-api-access-fhlkj\") pod \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.444627 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-sg-core-conf-yaml\") pod \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\" (UID: \"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce\") " Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.445590 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" (UID: "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.445773 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" (UID: "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.464875 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-scripts" (OuterVolumeSpecName: "scripts") pod "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" (UID: "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.465084 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-kube-api-access-fhlkj" (OuterVolumeSpecName: "kube-api-access-fhlkj") pod "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" (UID: "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce"). InnerVolumeSpecName "kube-api-access-fhlkj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.491366 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" (UID: "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.545007 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" (UID: "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.547769 4800 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.547817 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.547832 4800 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.547866 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.547885 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhlkj\" (UniqueName: \"kubernetes.io/projected/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-kube-api-access-fhlkj\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.547901 4800 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.627754 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-config-data" (OuterVolumeSpecName: "config-data") pod "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" (UID: "b003d801-e6f2-4fe9-b9a2-428f6d0f19ce"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.652544 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.681498 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7c78ff894b-2g5wf" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.138:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.138:8443: connect: connection refused" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.681636 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.876304 4800 generic.go:334] "Generic (PLEG): container finished" podID="840d89d2-2842-4aa9-a9f5-4de3794dcb34" containerID="5d05629313196fe1ac37421302bc0bdd3b37f0f75d2e1e3e54c85319586ff517" exitCode=0 Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.876387 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7efa-account-create-7lrdr" event={"ID":"840d89d2-2842-4aa9-a9f5-4de3794dcb34","Type":"ContainerDied","Data":"5d05629313196fe1ac37421302bc0bdd3b37f0f75d2e1e3e54c85319586ff517"} Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.877884 4800 generic.go:334] "Generic (PLEG): container finished" podID="1da2fa33-150f-4450-8504-ac3d17932a8e" containerID="97d497703c712ffa8c149578238827a3a982c16d73a1a92f4e4951ef8285773b" exitCode=0 Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.877928 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-b636-account-create-6dm4h" event={"ID":"1da2fa33-150f-4450-8504-ac3d17932a8e","Type":"ContainerDied","Data":"97d497703c712ffa8c149578238827a3a982c16d73a1a92f4e4951ef8285773b"} Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.880768 4800 generic.go:334] "Generic (PLEG): container finished" podID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerID="3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb" exitCode=0 Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.880837 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce","Type":"ContainerDied","Data":"3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb"} Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.880883 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b003d801-e6f2-4fe9-b9a2-428f6d0f19ce","Type":"ContainerDied","Data":"a415deaa991234ffb292998650d9acbedd8ee7051d6d9a4588206ecf585589c2"} Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.880908 4800 scope.go:117] "RemoveContainer" containerID="62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.880914 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.884053 4800 generic.go:334] "Generic (PLEG): container finished" podID="03a21aaa-366d-48c9-bf08-3ea77b154123" containerID="84bf5dd202b09f7cb71ce72a81067f398fd41c26ccaab69843c093b9c637edd8" exitCode=0 Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.884102 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-hjmpn" event={"ID":"03a21aaa-366d-48c9-bf08-3ea77b154123","Type":"ContainerDied","Data":"84bf5dd202b09f7cb71ce72a81067f398fd41c26ccaab69843c093b9c637edd8"} Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.893502 4800 generic.go:334] "Generic (PLEG): container finished" podID="3a909488-3abb-437b-9136-0b5856ff1700" containerID="734e4da61e6068b40675801ff6b71f2ef520eabc8b67b51099a26a328990b9a2" exitCode=0 Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.893675 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-487b-account-create-5gkwt" event={"ID":"3a909488-3abb-437b-9136-0b5856ff1700","Type":"ContainerDied","Data":"734e4da61e6068b40675801ff6b71f2ef520eabc8b67b51099a26a328990b9a2"} Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.903549 4800 generic.go:334] "Generic (PLEG): container finished" podID="1b6cb1a4-848f-4af5-bd9a-563d2ccd630e" containerID="ae142a7058f992b3199aa5d54d3f5edc41955c1015e086f975ca298de036731a" exitCode=0 Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.903891 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-ms2dt" event={"ID":"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e","Type":"ContainerDied","Data":"ae142a7058f992b3199aa5d54d3f5edc41955c1015e086f975ca298de036731a"} Nov 25 15:40:07 crc kubenswrapper[4800]: I1125 15:40:07.923049 4800 scope.go:117] "RemoveContainer" containerID="2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.022458 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.040456 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.082082 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:08 crc kubenswrapper[4800]: E1125 15:40:08.082653 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="proxy-httpd" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.082680 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="proxy-httpd" Nov 25 15:40:08 crc kubenswrapper[4800]: E1125 15:40:08.082694 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="ceilometer-central-agent" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.082704 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="ceilometer-central-agent" Nov 25 15:40:08 crc kubenswrapper[4800]: E1125 15:40:08.082735 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="ceilometer-notification-agent" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.082745 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="ceilometer-notification-agent" Nov 25 15:40:08 crc kubenswrapper[4800]: E1125 15:40:08.082770 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="sg-core" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.082779 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="sg-core" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.083033 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="sg-core" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.083056 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="proxy-httpd" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.083064 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="ceilometer-notification-agent" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.083075 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" containerName="ceilometer-central-agent" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.089316 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.099324 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.099520 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.102074 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.129820 4800 scope.go:117] "RemoveContainer" containerID="3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.175210 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-scripts\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.175301 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-config-data\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.175349 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.175368 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c693762c-f8a3-478f-94af-a37403c0243c-log-httpd\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.175424 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c693762c-f8a3-478f-94af-a37403c0243c-run-httpd\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.175474 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlhh9\" (UniqueName: \"kubernetes.io/projected/c693762c-f8a3-478f-94af-a37403c0243c-kube-api-access-rlhh9\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.175505 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.217693 4800 scope.go:117] "RemoveContainer" containerID="382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.273260 4800 scope.go:117] "RemoveContainer" containerID="62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.278072 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-scripts\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.278136 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-config-data\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.278177 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.278202 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c693762c-f8a3-478f-94af-a37403c0243c-log-httpd\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.278232 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c693762c-f8a3-478f-94af-a37403c0243c-run-httpd\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.278277 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlhh9\" (UniqueName: \"kubernetes.io/projected/c693762c-f8a3-478f-94af-a37403c0243c-kube-api-access-rlhh9\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.278310 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: E1125 15:40:08.278358 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb\": container with ID starting with 62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb not found: ID does not exist" containerID="62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.278412 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb"} err="failed to get container status \"62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb\": rpc error: code = NotFound desc = could not find container \"62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb\": container with ID starting with 62e17deff6a476e72205dedc0fc51da75d8adada5a45561fa2bb610bcb57fefb not found: ID does not exist" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.278446 4800 scope.go:117] "RemoveContainer" containerID="2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a" Nov 25 15:40:08 crc kubenswrapper[4800]: E1125 15:40:08.285221 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a\": container with ID starting with 2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a not found: ID does not exist" containerID="2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.285276 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a"} err="failed to get container status \"2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a\": rpc error: code = NotFound desc = could not find container \"2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a\": container with ID starting with 2c2ffbc2c17d1167d67cab09d78e88d87a0e1a1ec2f79a8dc20a8daeae414f2a not found: ID does not exist" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.285310 4800 scope.go:117] "RemoveContainer" containerID="3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.286458 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.287353 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: E1125 15:40:08.287540 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb\": container with ID starting with 3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb not found: ID does not exist" containerID="3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.287570 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb"} err="failed to get container status \"3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb\": rpc error: code = NotFound desc = could not find container \"3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb\": container with ID starting with 3b564d1824a378c5903475ff661a8bc6baf9156cc4177a6c40d0f401be30dacb not found: ID does not exist" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.287597 4800 scope.go:117] "RemoveContainer" containerID="382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404" Nov 25 15:40:08 crc kubenswrapper[4800]: E1125 15:40:08.288455 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404\": container with ID starting with 382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404 not found: ID does not exist" containerID="382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.288474 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404"} err="failed to get container status \"382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404\": rpc error: code = NotFound desc = could not find container \"382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404\": container with ID starting with 382c55d1a9c8886333ca45c4f45447ce31e3b6b54e4aa730ee3ffc1352b13404 not found: ID does not exist" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.292710 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-97869bf49-tc9dz" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.293721 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-scripts\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.294620 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c693762c-f8a3-478f-94af-a37403c0243c-log-httpd\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.294932 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c693762c-f8a3-478f-94af-a37403c0243c-run-httpd\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.304093 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-config-data\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.337770 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlhh9\" (UniqueName: \"kubernetes.io/projected/c693762c-f8a3-478f-94af-a37403c0243c-kube-api-access-rlhh9\") pod \"ceilometer-0\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.390871 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-968b7bdb8-ttgfk"] Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.392259 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-968b7bdb8-ttgfk" podUID="f4aa678c-d6e5-4dac-8e75-87a5a190badb" containerName="neutron-api" containerID="cri-o://d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6" gracePeriod=30 Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.393119 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-968b7bdb8-ttgfk" podUID="f4aa678c-d6e5-4dac-8e75-87a5a190badb" containerName="neutron-httpd" containerID="cri-o://2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447" gracePeriod=30 Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.417339 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.463085 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-m8npp" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.599146 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fa7d7c4-8163-4172-9ce7-96aae8e0e627-operator-scripts\") pod \"1fa7d7c4-8163-4172-9ce7-96aae8e0e627\" (UID: \"1fa7d7c4-8163-4172-9ce7-96aae8e0e627\") " Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.599292 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dcw2\" (UniqueName: \"kubernetes.io/projected/1fa7d7c4-8163-4172-9ce7-96aae8e0e627-kube-api-access-8dcw2\") pod \"1fa7d7c4-8163-4172-9ce7-96aae8e0e627\" (UID: \"1fa7d7c4-8163-4172-9ce7-96aae8e0e627\") " Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.599690 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1fa7d7c4-8163-4172-9ce7-96aae8e0e627-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1fa7d7c4-8163-4172-9ce7-96aae8e0e627" (UID: "1fa7d7c4-8163-4172-9ce7-96aae8e0e627"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.600138 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fa7d7c4-8163-4172-9ce7-96aae8e0e627-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.603705 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fa7d7c4-8163-4172-9ce7-96aae8e0e627-kube-api-access-8dcw2" (OuterVolumeSpecName: "kube-api-access-8dcw2") pod "1fa7d7c4-8163-4172-9ce7-96aae8e0e627" (UID: "1fa7d7c4-8163-4172-9ce7-96aae8e0e627"). InnerVolumeSpecName "kube-api-access-8dcw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.702335 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dcw2\" (UniqueName: \"kubernetes.io/projected/1fa7d7c4-8163-4172-9ce7-96aae8e0e627-kube-api-access-8dcw2\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.931960 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-m8npp" event={"ID":"1fa7d7c4-8163-4172-9ce7-96aae8e0e627","Type":"ContainerDied","Data":"0a6eb618c3171bf724775c20e542864988611d5e0a8f60cb688e1a32e47f1894"} Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.932006 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a6eb618c3171bf724775c20e542864988611d5e0a8f60cb688e1a32e47f1894" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.932006 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-m8npp" Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.937265 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4aa678c-d6e5-4dac-8e75-87a5a190badb" containerID="2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447" exitCode=0 Nov 25 15:40:08 crc kubenswrapper[4800]: I1125 15:40:08.937320 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-968b7bdb8-ttgfk" event={"ID":"f4aa678c-d6e5-4dac-8e75-87a5a190badb","Type":"ContainerDied","Data":"2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447"} Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.044522 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:09 crc kubenswrapper[4800]: W1125 15:40:09.054704 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc693762c_f8a3_478f_94af_a37403c0243c.slice/crio-78f07d3c0f82e84654add5a91b1f05625ffdfcd43ce4a517906ed5baf9cdd8b5 WatchSource:0}: Error finding container 78f07d3c0f82e84654add5a91b1f05625ffdfcd43ce4a517906ed5baf9cdd8b5: Status 404 returned error can't find the container with id 78f07d3c0f82e84654add5a91b1f05625ffdfcd43ce4a517906ed5baf9cdd8b5 Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.343769 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-487b-account-create-5gkwt" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.425406 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a909488-3abb-437b-9136-0b5856ff1700-operator-scripts\") pod \"3a909488-3abb-437b-9136-0b5856ff1700\" (UID: \"3a909488-3abb-437b-9136-0b5856ff1700\") " Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.425488 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6vcb\" (UniqueName: \"kubernetes.io/projected/3a909488-3abb-437b-9136-0b5856ff1700-kube-api-access-c6vcb\") pod \"3a909488-3abb-437b-9136-0b5856ff1700\" (UID: \"3a909488-3abb-437b-9136-0b5856ff1700\") " Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.427276 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a909488-3abb-437b-9136-0b5856ff1700-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3a909488-3abb-437b-9136-0b5856ff1700" (UID: "3a909488-3abb-437b-9136-0b5856ff1700"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.435048 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a909488-3abb-437b-9136-0b5856ff1700-kube-api-access-c6vcb" (OuterVolumeSpecName: "kube-api-access-c6vcb") pod "3a909488-3abb-437b-9136-0b5856ff1700" (UID: "3a909488-3abb-437b-9136-0b5856ff1700"). InnerVolumeSpecName "kube-api-access-c6vcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.529226 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a909488-3abb-437b-9136-0b5856ff1700-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.529258 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6vcb\" (UniqueName: \"kubernetes.io/projected/3a909488-3abb-437b-9136-0b5856ff1700-kube-api-access-c6vcb\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.664785 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-b636-account-create-6dm4h" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.728549 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7efa-account-create-7lrdr" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.757392 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-hjmpn" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.762081 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-ms2dt" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.806485 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b003d801-e6f2-4fe9-b9a2-428f6d0f19ce" path="/var/lib/kubelet/pods/b003d801-e6f2-4fe9-b9a2-428f6d0f19ce/volumes" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.834342 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4ch6\" (UniqueName: \"kubernetes.io/projected/840d89d2-2842-4aa9-a9f5-4de3794dcb34-kube-api-access-h4ch6\") pod \"840d89d2-2842-4aa9-a9f5-4de3794dcb34\" (UID: \"840d89d2-2842-4aa9-a9f5-4de3794dcb34\") " Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.834530 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03a21aaa-366d-48c9-bf08-3ea77b154123-operator-scripts\") pod \"03a21aaa-366d-48c9-bf08-3ea77b154123\" (UID: \"03a21aaa-366d-48c9-bf08-3ea77b154123\") " Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.834623 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1da2fa33-150f-4450-8504-ac3d17932a8e-operator-scripts\") pod \"1da2fa33-150f-4450-8504-ac3d17932a8e\" (UID: \"1da2fa33-150f-4450-8504-ac3d17932a8e\") " Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.834697 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/840d89d2-2842-4aa9-a9f5-4de3794dcb34-operator-scripts\") pod \"840d89d2-2842-4aa9-a9f5-4de3794dcb34\" (UID: \"840d89d2-2842-4aa9-a9f5-4de3794dcb34\") " Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.834789 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knxb6\" (UniqueName: \"kubernetes.io/projected/03a21aaa-366d-48c9-bf08-3ea77b154123-kube-api-access-knxb6\") pod \"03a21aaa-366d-48c9-bf08-3ea77b154123\" (UID: \"03a21aaa-366d-48c9-bf08-3ea77b154123\") " Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.834851 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgvhh\" (UniqueName: \"kubernetes.io/projected/1da2fa33-150f-4450-8504-ac3d17932a8e-kube-api-access-dgvhh\") pod \"1da2fa33-150f-4450-8504-ac3d17932a8e\" (UID: \"1da2fa33-150f-4450-8504-ac3d17932a8e\") " Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.835529 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/840d89d2-2842-4aa9-a9f5-4de3794dcb34-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "840d89d2-2842-4aa9-a9f5-4de3794dcb34" (UID: "840d89d2-2842-4aa9-a9f5-4de3794dcb34"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.835993 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1da2fa33-150f-4450-8504-ac3d17932a8e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1da2fa33-150f-4450-8504-ac3d17932a8e" (UID: "1da2fa33-150f-4450-8504-ac3d17932a8e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.842372 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03a21aaa-366d-48c9-bf08-3ea77b154123-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "03a21aaa-366d-48c9-bf08-3ea77b154123" (UID: "03a21aaa-366d-48c9-bf08-3ea77b154123"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.848317 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1da2fa33-150f-4450-8504-ac3d17932a8e-kube-api-access-dgvhh" (OuterVolumeSpecName: "kube-api-access-dgvhh") pod "1da2fa33-150f-4450-8504-ac3d17932a8e" (UID: "1da2fa33-150f-4450-8504-ac3d17932a8e"). InnerVolumeSpecName "kube-api-access-dgvhh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.848879 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03a21aaa-366d-48c9-bf08-3ea77b154123-kube-api-access-knxb6" (OuterVolumeSpecName: "kube-api-access-knxb6") pod "03a21aaa-366d-48c9-bf08-3ea77b154123" (UID: "03a21aaa-366d-48c9-bf08-3ea77b154123"). InnerVolumeSpecName "kube-api-access-knxb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.849983 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/840d89d2-2842-4aa9-a9f5-4de3794dcb34-kube-api-access-h4ch6" (OuterVolumeSpecName: "kube-api-access-h4ch6") pod "840d89d2-2842-4aa9-a9f5-4de3794dcb34" (UID: "840d89d2-2842-4aa9-a9f5-4de3794dcb34"). InnerVolumeSpecName "kube-api-access-h4ch6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.936923 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e-operator-scripts\") pod \"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e\" (UID: \"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e\") " Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.937143 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncdrm\" (UniqueName: \"kubernetes.io/projected/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e-kube-api-access-ncdrm\") pod \"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e\" (UID: \"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e\") " Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.937746 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1b6cb1a4-848f-4af5-bd9a-563d2ccd630e" (UID: "1b6cb1a4-848f-4af5-bd9a-563d2ccd630e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.938437 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03a21aaa-366d-48c9-bf08-3ea77b154123-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.938477 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1da2fa33-150f-4450-8504-ac3d17932a8e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.938492 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/840d89d2-2842-4aa9-a9f5-4de3794dcb34-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.938508 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knxb6\" (UniqueName: \"kubernetes.io/projected/03a21aaa-366d-48c9-bf08-3ea77b154123-kube-api-access-knxb6\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.938526 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgvhh\" (UniqueName: \"kubernetes.io/projected/1da2fa33-150f-4450-8504-ac3d17932a8e-kube-api-access-dgvhh\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.938541 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4ch6\" (UniqueName: \"kubernetes.io/projected/840d89d2-2842-4aa9-a9f5-4de3794dcb34-kube-api-access-h4ch6\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.938552 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.942374 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e-kube-api-access-ncdrm" (OuterVolumeSpecName: "kube-api-access-ncdrm") pod "1b6cb1a4-848f-4af5-bd9a-563d2ccd630e" (UID: "1b6cb1a4-848f-4af5-bd9a-563d2ccd630e"). InnerVolumeSpecName "kube-api-access-ncdrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.949176 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-ms2dt" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.948997 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-ms2dt" event={"ID":"1b6cb1a4-848f-4af5-bd9a-563d2ccd630e","Type":"ContainerDied","Data":"bc5300016e16576255121d197068dc3c489121bf9404febb5a0366f05f49cd7c"} Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.949432 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc5300016e16576255121d197068dc3c489121bf9404febb5a0366f05f49cd7c" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.951735 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7efa-account-create-7lrdr" event={"ID":"840d89d2-2842-4aa9-a9f5-4de3794dcb34","Type":"ContainerDied","Data":"3659058e28fe69614b30ee206677ed9a38968e5c3f9e6925546e843bfb9be2e5"} Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.951769 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3659058e28fe69614b30ee206677ed9a38968e5c3f9e6925546e843bfb9be2e5" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.951833 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7efa-account-create-7lrdr" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.960482 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-b636-account-create-6dm4h" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.960673 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-b636-account-create-6dm4h" event={"ID":"1da2fa33-150f-4450-8504-ac3d17932a8e","Type":"ContainerDied","Data":"6759ed01883a1a2efdbde13a066517858e3ec3d537cc93d216c53430877f8a9b"} Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.960836 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6759ed01883a1a2efdbde13a066517858e3ec3d537cc93d216c53430877f8a9b" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.965918 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-hjmpn" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.965911 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-hjmpn" event={"ID":"03a21aaa-366d-48c9-bf08-3ea77b154123","Type":"ContainerDied","Data":"29a1e04178140daa113557af047a62eac0c792ad34304ebe492dba701b3af1f7"} Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.967250 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29a1e04178140daa113557af047a62eac0c792ad34304ebe492dba701b3af1f7" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.969433 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c693762c-f8a3-478f-94af-a37403c0243c","Type":"ContainerStarted","Data":"bbfd7d89199e3d9946f9c3923f15404aba7242af32a00e74b20d23a527bb95c2"} Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.969610 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c693762c-f8a3-478f-94af-a37403c0243c","Type":"ContainerStarted","Data":"78f07d3c0f82e84654add5a91b1f05625ffdfcd43ce4a517906ed5baf9cdd8b5"} Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.977621 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-487b-account-create-5gkwt" event={"ID":"3a909488-3abb-437b-9136-0b5856ff1700","Type":"ContainerDied","Data":"50561e29f783bc51b79f52cfac3ae38c7cb7e7e6bd40035e85cd946caf94cd1b"} Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.977674 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50561e29f783bc51b79f52cfac3ae38c7cb7e7e6bd40035e85cd946caf94cd1b" Nov 25 15:40:09 crc kubenswrapper[4800]: I1125 15:40:09.977766 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-487b-account-create-5gkwt" Nov 25 15:40:10 crc kubenswrapper[4800]: I1125 15:40:10.039968 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncdrm\" (UniqueName: \"kubernetes.io/projected/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e-kube-api-access-ncdrm\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:10 crc kubenswrapper[4800]: I1125 15:40:10.838621 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:40:10 crc kubenswrapper[4800]: I1125 15:40:10.962016 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-config\") pod \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " Nov 25 15:40:10 crc kubenswrapper[4800]: I1125 15:40:10.962095 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-combined-ca-bundle\") pod \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " Nov 25 15:40:10 crc kubenswrapper[4800]: I1125 15:40:10.962240 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-ovndb-tls-certs\") pod \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " Nov 25 15:40:10 crc kubenswrapper[4800]: I1125 15:40:10.962295 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f86m6\" (UniqueName: \"kubernetes.io/projected/f4aa678c-d6e5-4dac-8e75-87a5a190badb-kube-api-access-f86m6\") pod \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " Nov 25 15:40:10 crc kubenswrapper[4800]: I1125 15:40:10.962342 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-httpd-config\") pod \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\" (UID: \"f4aa678c-d6e5-4dac-8e75-87a5a190badb\") " Nov 25 15:40:10 crc kubenswrapper[4800]: I1125 15:40:10.981163 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4aa678c-d6e5-4dac-8e75-87a5a190badb-kube-api-access-f86m6" (OuterVolumeSpecName: "kube-api-access-f86m6") pod "f4aa678c-d6e5-4dac-8e75-87a5a190badb" (UID: "f4aa678c-d6e5-4dac-8e75-87a5a190badb"). InnerVolumeSpecName "kube-api-access-f86m6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:10 crc kubenswrapper[4800]: I1125 15:40:10.995990 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "f4aa678c-d6e5-4dac-8e75-87a5a190badb" (UID: "f4aa678c-d6e5-4dac-8e75-87a5a190badb"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.016230 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c693762c-f8a3-478f-94af-a37403c0243c","Type":"ContainerStarted","Data":"89fc25306fd5245b65f2aa1c7928c143aba87ac9bc49d617c93c504774397742"} Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.028163 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4aa678c-d6e5-4dac-8e75-87a5a190badb" containerID="d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6" exitCode=0 Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.028207 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-968b7bdb8-ttgfk" event={"ID":"f4aa678c-d6e5-4dac-8e75-87a5a190badb","Type":"ContainerDied","Data":"d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6"} Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.028238 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-968b7bdb8-ttgfk" event={"ID":"f4aa678c-d6e5-4dac-8e75-87a5a190badb","Type":"ContainerDied","Data":"52fe29b64bbf31f0cb55549ca701b07c8aa09cd1e196526f80aaa441431da3aa"} Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.028255 4800 scope.go:117] "RemoveContainer" containerID="2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.028399 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-968b7bdb8-ttgfk" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.039189 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-config" (OuterVolumeSpecName: "config") pod "f4aa678c-d6e5-4dac-8e75-87a5a190badb" (UID: "f4aa678c-d6e5-4dac-8e75-87a5a190badb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.058729 4800 scope.go:117] "RemoveContainer" containerID="d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.069204 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f86m6\" (UniqueName: \"kubernetes.io/projected/f4aa678c-d6e5-4dac-8e75-87a5a190badb-kube-api-access-f86m6\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.069258 4800 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.069271 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.099037 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "f4aa678c-d6e5-4dac-8e75-87a5a190badb" (UID: "f4aa678c-d6e5-4dac-8e75-87a5a190badb"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.105194 4800 scope.go:117] "RemoveContainer" containerID="2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447" Nov 25 15:40:11 crc kubenswrapper[4800]: E1125 15:40:11.105981 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447\": container with ID starting with 2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447 not found: ID does not exist" containerID="2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.106015 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447"} err="failed to get container status \"2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447\": rpc error: code = NotFound desc = could not find container \"2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447\": container with ID starting with 2ec10e02d6d8879e7b993c368c8dda713963709232b76bc475e89c1864197447 not found: ID does not exist" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.106036 4800 scope.go:117] "RemoveContainer" containerID="d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.109320 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f4aa678c-d6e5-4dac-8e75-87a5a190badb" (UID: "f4aa678c-d6e5-4dac-8e75-87a5a190badb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:11 crc kubenswrapper[4800]: E1125 15:40:11.109511 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6\": container with ID starting with d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6 not found: ID does not exist" containerID="d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.109539 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6"} err="failed to get container status \"d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6\": rpc error: code = NotFound desc = could not find container \"d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6\": container with ID starting with d24d3ef285d2e8bfd5bb4873c46f510e273fb0fe0852d9099fc4f208321645b6 not found: ID does not exist" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.171643 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.171674 4800 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4aa678c-d6e5-4dac-8e75-87a5a190badb-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.373689 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-968b7bdb8-ttgfk"] Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.385911 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-968b7bdb8-ttgfk"] Nov 25 15:40:11 crc kubenswrapper[4800]: E1125 15:40:11.467170 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e726809_c215_4d1a_95a3_d0fadede3cca.slice/crio-126e343945a386573f7a985814f180bc379c483865c325a1d87fbca3fd91cda2\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e726809_c215_4d1a_95a3_d0fadede3cca.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4aa678c_d6e5_4dac_8e75_87a5a190badb.slice/crio-52fe29b64bbf31f0cb55549ca701b07c8aa09cd1e196526f80aaa441431da3aa\": RecentStats: unable to find data in memory cache]" Nov 25 15:40:11 crc kubenswrapper[4800]: I1125 15:40:11.803992 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4aa678c-d6e5-4dac-8e75-87a5a190badb" path="/var/lib/kubelet/pods/f4aa678c-d6e5-4dac-8e75-87a5a190badb/volumes" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.044549 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c693762c-f8a3-478f-94af-a37403c0243c","Type":"ContainerStarted","Data":"f2a6338a91f06db47ff6a63f84146f4edd17833d9cd54758ac95664c874c115e"} Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.055288 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5nblp"] Nov 25 15:40:12 crc kubenswrapper[4800]: E1125 15:40:12.055717 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b6cb1a4-848f-4af5-bd9a-563d2ccd630e" containerName="mariadb-database-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.055737 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b6cb1a4-848f-4af5-bd9a-563d2ccd630e" containerName="mariadb-database-create" Nov 25 15:40:12 crc kubenswrapper[4800]: E1125 15:40:12.055751 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1da2fa33-150f-4450-8504-ac3d17932a8e" containerName="mariadb-account-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.055759 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1da2fa33-150f-4450-8504-ac3d17932a8e" containerName="mariadb-account-create" Nov 25 15:40:12 crc kubenswrapper[4800]: E1125 15:40:12.055781 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4aa678c-d6e5-4dac-8e75-87a5a190badb" containerName="neutron-api" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.055788 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4aa678c-d6e5-4dac-8e75-87a5a190badb" containerName="neutron-api" Nov 25 15:40:12 crc kubenswrapper[4800]: E1125 15:40:12.055801 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fa7d7c4-8163-4172-9ce7-96aae8e0e627" containerName="mariadb-database-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.055808 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fa7d7c4-8163-4172-9ce7-96aae8e0e627" containerName="mariadb-database-create" Nov 25 15:40:12 crc kubenswrapper[4800]: E1125 15:40:12.055828 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a909488-3abb-437b-9136-0b5856ff1700" containerName="mariadb-account-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.055835 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a909488-3abb-437b-9136-0b5856ff1700" containerName="mariadb-account-create" Nov 25 15:40:12 crc kubenswrapper[4800]: E1125 15:40:12.055859 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4aa678c-d6e5-4dac-8e75-87a5a190badb" containerName="neutron-httpd" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.055865 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4aa678c-d6e5-4dac-8e75-87a5a190badb" containerName="neutron-httpd" Nov 25 15:40:12 crc kubenswrapper[4800]: E1125 15:40:12.055874 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03a21aaa-366d-48c9-bf08-3ea77b154123" containerName="mariadb-database-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.055880 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="03a21aaa-366d-48c9-bf08-3ea77b154123" containerName="mariadb-database-create" Nov 25 15:40:12 crc kubenswrapper[4800]: E1125 15:40:12.055887 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="840d89d2-2842-4aa9-a9f5-4de3794dcb34" containerName="mariadb-account-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.055893 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="840d89d2-2842-4aa9-a9f5-4de3794dcb34" containerName="mariadb-account-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.056119 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a909488-3abb-437b-9136-0b5856ff1700" containerName="mariadb-account-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.056136 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="840d89d2-2842-4aa9-a9f5-4de3794dcb34" containerName="mariadb-account-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.056143 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="03a21aaa-366d-48c9-bf08-3ea77b154123" containerName="mariadb-database-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.056154 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4aa678c-d6e5-4dac-8e75-87a5a190badb" containerName="neutron-api" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.056169 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4aa678c-d6e5-4dac-8e75-87a5a190badb" containerName="neutron-httpd" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.056176 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1da2fa33-150f-4450-8504-ac3d17932a8e" containerName="mariadb-account-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.056190 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fa7d7c4-8163-4172-9ce7-96aae8e0e627" containerName="mariadb-database-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.056201 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b6cb1a4-848f-4af5-bd9a-563d2ccd630e" containerName="mariadb-database-create" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.056823 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.062044 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.062405 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.062400 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-w9n88" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.084066 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5nblp"] Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.190643 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-scripts\") pod \"nova-cell0-conductor-db-sync-5nblp\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.190967 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9xr9\" (UniqueName: \"kubernetes.io/projected/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-kube-api-access-k9xr9\") pod \"nova-cell0-conductor-db-sync-5nblp\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.191438 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-config-data\") pod \"nova-cell0-conductor-db-sync-5nblp\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.191703 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5nblp\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.293422 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9xr9\" (UniqueName: \"kubernetes.io/projected/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-kube-api-access-k9xr9\") pod \"nova-cell0-conductor-db-sync-5nblp\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.293510 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-config-data\") pod \"nova-cell0-conductor-db-sync-5nblp\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.293608 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5nblp\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.293652 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-scripts\") pod \"nova-cell0-conductor-db-sync-5nblp\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.298932 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5nblp\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.299015 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-config-data\") pod \"nova-cell0-conductor-db-sync-5nblp\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.299061 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-scripts\") pod \"nova-cell0-conductor-db-sync-5nblp\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.315436 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9xr9\" (UniqueName: \"kubernetes.io/projected/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-kube-api-access-k9xr9\") pod \"nova-cell0-conductor-db-sync-5nblp\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.376309 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:12 crc kubenswrapper[4800]: I1125 15:40:12.865973 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5nblp"] Nov 25 15:40:12 crc kubenswrapper[4800]: W1125 15:40:12.880149 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8621b3fe_1d3c_45a9_897b_5cd1eb3f5a07.slice/crio-20c205ab7d57b5914d3bf0d34156fcea8eb707633beae8e55de674183ae22acb WatchSource:0}: Error finding container 20c205ab7d57b5914d3bf0d34156fcea8eb707633beae8e55de674183ae22acb: Status 404 returned error can't find the container with id 20c205ab7d57b5914d3bf0d34156fcea8eb707633beae8e55de674183ae22acb Nov 25 15:40:13 crc kubenswrapper[4800]: I1125 15:40:13.056696 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5nblp" event={"ID":"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07","Type":"ContainerStarted","Data":"20c205ab7d57b5914d3bf0d34156fcea8eb707633beae8e55de674183ae22acb"} Nov 25 15:40:14 crc kubenswrapper[4800]: I1125 15:40:14.069365 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c693762c-f8a3-478f-94af-a37403c0243c","Type":"ContainerStarted","Data":"6b04a30b454d4017858979e2381c1708c5c020290d0207e972a4baa9e8699a85"} Nov 25 15:40:14 crc kubenswrapper[4800]: I1125 15:40:14.070648 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 15:40:14 crc kubenswrapper[4800]: I1125 15:40:14.096997 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.234594616 podStartE2EDuration="6.096976037s" podCreationTimestamp="2025-11-25 15:40:08 +0000 UTC" firstStartedPulling="2025-11-25 15:40:09.079035889 +0000 UTC m=+1370.133444361" lastFinishedPulling="2025-11-25 15:40:12.9414173 +0000 UTC m=+1373.995825782" observedRunningTime="2025-11-25 15:40:14.09675314 +0000 UTC m=+1375.151161622" watchObservedRunningTime="2025-11-25 15:40:14.096976037 +0000 UTC m=+1375.151384519" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.771239 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.881423 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pw6xd\" (UniqueName: \"kubernetes.io/projected/0e341748-e3fe-4c2d-933e-fdea97ee66b6-kube-api-access-pw6xd\") pod \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.881485 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-horizon-secret-key\") pod \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.881586 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0e341748-e3fe-4c2d-933e-fdea97ee66b6-config-data\") pod \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.881708 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e341748-e3fe-4c2d-933e-fdea97ee66b6-scripts\") pod \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.881750 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-horizon-tls-certs\") pod \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.881810 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e341748-e3fe-4c2d-933e-fdea97ee66b6-logs\") pod \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.881995 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-combined-ca-bundle\") pod \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\" (UID: \"0e341748-e3fe-4c2d-933e-fdea97ee66b6\") " Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.884225 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e341748-e3fe-4c2d-933e-fdea97ee66b6-logs" (OuterVolumeSpecName: "logs") pod "0e341748-e3fe-4c2d-933e-fdea97ee66b6" (UID: "0e341748-e3fe-4c2d-933e-fdea97ee66b6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.894068 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "0e341748-e3fe-4c2d-933e-fdea97ee66b6" (UID: "0e341748-e3fe-4c2d-933e-fdea97ee66b6"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.894147 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e341748-e3fe-4c2d-933e-fdea97ee66b6-kube-api-access-pw6xd" (OuterVolumeSpecName: "kube-api-access-pw6xd") pod "0e341748-e3fe-4c2d-933e-fdea97ee66b6" (UID: "0e341748-e3fe-4c2d-933e-fdea97ee66b6"). InnerVolumeSpecName "kube-api-access-pw6xd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.916550 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e341748-e3fe-4c2d-933e-fdea97ee66b6-scripts" (OuterVolumeSpecName: "scripts") pod "0e341748-e3fe-4c2d-933e-fdea97ee66b6" (UID: "0e341748-e3fe-4c2d-933e-fdea97ee66b6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.925588 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0e341748-e3fe-4c2d-933e-fdea97ee66b6" (UID: "0e341748-e3fe-4c2d-933e-fdea97ee66b6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.950041 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "0e341748-e3fe-4c2d-933e-fdea97ee66b6" (UID: "0e341748-e3fe-4c2d-933e-fdea97ee66b6"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.962462 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e341748-e3fe-4c2d-933e-fdea97ee66b6-config-data" (OuterVolumeSpecName: "config-data") pod "0e341748-e3fe-4c2d-933e-fdea97ee66b6" (UID: "0e341748-e3fe-4c2d-933e-fdea97ee66b6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.984801 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e341748-e3fe-4c2d-933e-fdea97ee66b6-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.984836 4800 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.984861 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e341748-e3fe-4c2d-933e-fdea97ee66b6-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.984870 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.984880 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pw6xd\" (UniqueName: \"kubernetes.io/projected/0e341748-e3fe-4c2d-933e-fdea97ee66b6-kube-api-access-pw6xd\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.984889 4800 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0e341748-e3fe-4c2d-933e-fdea97ee66b6-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.984897 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0e341748-e3fe-4c2d-933e-fdea97ee66b6-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:15 crc kubenswrapper[4800]: I1125 15:40:15.986607 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:16 crc kubenswrapper[4800]: I1125 15:40:16.101055 4800 generic.go:334] "Generic (PLEG): container finished" podID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerID="9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55" exitCode=137 Nov 25 15:40:16 crc kubenswrapper[4800]: I1125 15:40:16.101189 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7c78ff894b-2g5wf" Nov 25 15:40:16 crc kubenswrapper[4800]: I1125 15:40:16.101975 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7c78ff894b-2g5wf" event={"ID":"0e341748-e3fe-4c2d-933e-fdea97ee66b6","Type":"ContainerDied","Data":"9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55"} Nov 25 15:40:16 crc kubenswrapper[4800]: I1125 15:40:16.102045 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7c78ff894b-2g5wf" event={"ID":"0e341748-e3fe-4c2d-933e-fdea97ee66b6","Type":"ContainerDied","Data":"4ab4e3c92895bc425cdde8fde98d58a06eb28289a36c3c85a7424de92591d0b4"} Nov 25 15:40:16 crc kubenswrapper[4800]: I1125 15:40:16.102083 4800 scope.go:117] "RemoveContainer" containerID="d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94" Nov 25 15:40:16 crc kubenswrapper[4800]: I1125 15:40:16.146683 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7c78ff894b-2g5wf"] Nov 25 15:40:16 crc kubenswrapper[4800]: I1125 15:40:16.158081 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7c78ff894b-2g5wf"] Nov 25 15:40:16 crc kubenswrapper[4800]: I1125 15:40:16.310738 4800 scope.go:117] "RemoveContainer" containerID="9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55" Nov 25 15:40:16 crc kubenswrapper[4800]: I1125 15:40:16.333334 4800 scope.go:117] "RemoveContainer" containerID="d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94" Nov 25 15:40:16 crc kubenswrapper[4800]: E1125 15:40:16.334071 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94\": container with ID starting with d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94 not found: ID does not exist" containerID="d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94" Nov 25 15:40:16 crc kubenswrapper[4800]: I1125 15:40:16.334109 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94"} err="failed to get container status \"d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94\": rpc error: code = NotFound desc = could not find container \"d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94\": container with ID starting with d2eadde3b93cb2278291af9b5041598eba595c77511c597efc726f5894ddff94 not found: ID does not exist" Nov 25 15:40:16 crc kubenswrapper[4800]: I1125 15:40:16.334137 4800 scope.go:117] "RemoveContainer" containerID="9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55" Nov 25 15:40:16 crc kubenswrapper[4800]: E1125 15:40:16.334559 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55\": container with ID starting with 9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55 not found: ID does not exist" containerID="9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55" Nov 25 15:40:16 crc kubenswrapper[4800]: I1125 15:40:16.334576 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55"} err="failed to get container status \"9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55\": rpc error: code = NotFound desc = could not find container \"9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55\": container with ID starting with 9db15c7637c90e8927dfddcf6770afab86a5536c60141a3e8722435ca5354c55 not found: ID does not exist" Nov 25 15:40:17 crc kubenswrapper[4800]: I1125 15:40:17.113395 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="ceilometer-central-agent" containerID="cri-o://bbfd7d89199e3d9946f9c3923f15404aba7242af32a00e74b20d23a527bb95c2" gracePeriod=30 Nov 25 15:40:17 crc kubenswrapper[4800]: I1125 15:40:17.113456 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="sg-core" containerID="cri-o://f2a6338a91f06db47ff6a63f84146f4edd17833d9cd54758ac95664c874c115e" gracePeriod=30 Nov 25 15:40:17 crc kubenswrapper[4800]: I1125 15:40:17.113462 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="proxy-httpd" containerID="cri-o://6b04a30b454d4017858979e2381c1708c5c020290d0207e972a4baa9e8699a85" gracePeriod=30 Nov 25 15:40:17 crc kubenswrapper[4800]: I1125 15:40:17.113495 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="ceilometer-notification-agent" containerID="cri-o://89fc25306fd5245b65f2aa1c7928c143aba87ac9bc49d617c93c504774397742" gracePeriod=30 Nov 25 15:40:17 crc kubenswrapper[4800]: I1125 15:40:17.797934 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" path="/var/lib/kubelet/pods/0e341748-e3fe-4c2d-933e-fdea97ee66b6/volumes" Nov 25 15:40:18 crc kubenswrapper[4800]: I1125 15:40:18.124459 4800 generic.go:334] "Generic (PLEG): container finished" podID="c693762c-f8a3-478f-94af-a37403c0243c" containerID="6b04a30b454d4017858979e2381c1708c5c020290d0207e972a4baa9e8699a85" exitCode=0 Nov 25 15:40:18 crc kubenswrapper[4800]: I1125 15:40:18.124503 4800 generic.go:334] "Generic (PLEG): container finished" podID="c693762c-f8a3-478f-94af-a37403c0243c" containerID="f2a6338a91f06db47ff6a63f84146f4edd17833d9cd54758ac95664c874c115e" exitCode=2 Nov 25 15:40:18 crc kubenswrapper[4800]: I1125 15:40:18.124516 4800 generic.go:334] "Generic (PLEG): container finished" podID="c693762c-f8a3-478f-94af-a37403c0243c" containerID="89fc25306fd5245b65f2aa1c7928c143aba87ac9bc49d617c93c504774397742" exitCode=0 Nov 25 15:40:18 crc kubenswrapper[4800]: I1125 15:40:18.124525 4800 generic.go:334] "Generic (PLEG): container finished" podID="c693762c-f8a3-478f-94af-a37403c0243c" containerID="bbfd7d89199e3d9946f9c3923f15404aba7242af32a00e74b20d23a527bb95c2" exitCode=0 Nov 25 15:40:18 crc kubenswrapper[4800]: I1125 15:40:18.124545 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c693762c-f8a3-478f-94af-a37403c0243c","Type":"ContainerDied","Data":"6b04a30b454d4017858979e2381c1708c5c020290d0207e972a4baa9e8699a85"} Nov 25 15:40:18 crc kubenswrapper[4800]: I1125 15:40:18.124609 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c693762c-f8a3-478f-94af-a37403c0243c","Type":"ContainerDied","Data":"f2a6338a91f06db47ff6a63f84146f4edd17833d9cd54758ac95664c874c115e"} Nov 25 15:40:18 crc kubenswrapper[4800]: I1125 15:40:18.124619 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c693762c-f8a3-478f-94af-a37403c0243c","Type":"ContainerDied","Data":"89fc25306fd5245b65f2aa1c7928c143aba87ac9bc49d617c93c504774397742"} Nov 25 15:40:18 crc kubenswrapper[4800]: I1125 15:40:18.124633 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c693762c-f8a3-478f-94af-a37403c0243c","Type":"ContainerDied","Data":"bbfd7d89199e3d9946f9c3923f15404aba7242af32a00e74b20d23a527bb95c2"} Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.691025 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.824891 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-config-data\") pod \"c693762c-f8a3-478f-94af-a37403c0243c\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.825312 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-sg-core-conf-yaml\") pod \"c693762c-f8a3-478f-94af-a37403c0243c\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.825516 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c693762c-f8a3-478f-94af-a37403c0243c-run-httpd\") pod \"c693762c-f8a3-478f-94af-a37403c0243c\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.825575 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlhh9\" (UniqueName: \"kubernetes.io/projected/c693762c-f8a3-478f-94af-a37403c0243c-kube-api-access-rlhh9\") pod \"c693762c-f8a3-478f-94af-a37403c0243c\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.825672 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-combined-ca-bundle\") pod \"c693762c-f8a3-478f-94af-a37403c0243c\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.825724 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-scripts\") pod \"c693762c-f8a3-478f-94af-a37403c0243c\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.825775 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c693762c-f8a3-478f-94af-a37403c0243c-log-httpd\") pod \"c693762c-f8a3-478f-94af-a37403c0243c\" (UID: \"c693762c-f8a3-478f-94af-a37403c0243c\") " Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.826257 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c693762c-f8a3-478f-94af-a37403c0243c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c693762c-f8a3-478f-94af-a37403c0243c" (UID: "c693762c-f8a3-478f-94af-a37403c0243c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.826594 4800 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c693762c-f8a3-478f-94af-a37403c0243c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.826758 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c693762c-f8a3-478f-94af-a37403c0243c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c693762c-f8a3-478f-94af-a37403c0243c" (UID: "c693762c-f8a3-478f-94af-a37403c0243c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.830520 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c693762c-f8a3-478f-94af-a37403c0243c-kube-api-access-rlhh9" (OuterVolumeSpecName: "kube-api-access-rlhh9") pod "c693762c-f8a3-478f-94af-a37403c0243c" (UID: "c693762c-f8a3-478f-94af-a37403c0243c"). InnerVolumeSpecName "kube-api-access-rlhh9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.832016 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-scripts" (OuterVolumeSpecName: "scripts") pod "c693762c-f8a3-478f-94af-a37403c0243c" (UID: "c693762c-f8a3-478f-94af-a37403c0243c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.851969 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c693762c-f8a3-478f-94af-a37403c0243c" (UID: "c693762c-f8a3-478f-94af-a37403c0243c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.906702 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c693762c-f8a3-478f-94af-a37403c0243c" (UID: "c693762c-f8a3-478f-94af-a37403c0243c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.921983 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-config-data" (OuterVolumeSpecName: "config-data") pod "c693762c-f8a3-478f-94af-a37403c0243c" (UID: "c693762c-f8a3-478f-94af-a37403c0243c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.928417 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlhh9\" (UniqueName: \"kubernetes.io/projected/c693762c-f8a3-478f-94af-a37403c0243c-kube-api-access-rlhh9\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.928452 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.928463 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.928475 4800 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c693762c-f8a3-478f-94af-a37403c0243c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.928485 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:21 crc kubenswrapper[4800]: I1125 15:40:21.928498 4800 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c693762c-f8a3-478f-94af-a37403c0243c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.175450 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5nblp" event={"ID":"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07","Type":"ContainerStarted","Data":"8cc7a09fd59fda2d84fff87b627a178d74a7daa78e88cec08de27cf6c01eaeb6"} Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.182800 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c693762c-f8a3-478f-94af-a37403c0243c","Type":"ContainerDied","Data":"78f07d3c0f82e84654add5a91b1f05625ffdfcd43ce4a517906ed5baf9cdd8b5"} Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.182869 4800 scope.go:117] "RemoveContainer" containerID="6b04a30b454d4017858979e2381c1708c5c020290d0207e972a4baa9e8699a85" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.182993 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.207512 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-5nblp" podStartSLOduration=1.459084539 podStartE2EDuration="10.207490164s" podCreationTimestamp="2025-11-25 15:40:12 +0000 UTC" firstStartedPulling="2025-11-25 15:40:12.882058406 +0000 UTC m=+1373.936466888" lastFinishedPulling="2025-11-25 15:40:21.630464031 +0000 UTC m=+1382.684872513" observedRunningTime="2025-11-25 15:40:22.204957793 +0000 UTC m=+1383.259366275" watchObservedRunningTime="2025-11-25 15:40:22.207490164 +0000 UTC m=+1383.261898646" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.293403 4800 scope.go:117] "RemoveContainer" containerID="f2a6338a91f06db47ff6a63f84146f4edd17833d9cd54758ac95664c874c115e" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.294545 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.306176 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.332658 4800 scope.go:117] "RemoveContainer" containerID="89fc25306fd5245b65f2aa1c7928c143aba87ac9bc49d617c93c504774397742" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.341423 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:22 crc kubenswrapper[4800]: E1125 15:40:22.342054 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="proxy-httpd" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.342082 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="proxy-httpd" Nov 25 15:40:22 crc kubenswrapper[4800]: E1125 15:40:22.342126 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="sg-core" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.342136 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="sg-core" Nov 25 15:40:22 crc kubenswrapper[4800]: E1125 15:40:22.342148 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="ceilometer-notification-agent" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.342160 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="ceilometer-notification-agent" Nov 25 15:40:22 crc kubenswrapper[4800]: E1125 15:40:22.342179 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerName="horizon" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.342186 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerName="horizon" Nov 25 15:40:22 crc kubenswrapper[4800]: E1125 15:40:22.342208 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="ceilometer-central-agent" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.342217 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="ceilometer-central-agent" Nov 25 15:40:22 crc kubenswrapper[4800]: E1125 15:40:22.342231 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerName="horizon-log" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.342239 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerName="horizon-log" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.342479 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerName="horizon-log" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.342513 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="ceilometer-central-agent" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.342532 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e341748-e3fe-4c2d-933e-fdea97ee66b6" containerName="horizon" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.342554 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="sg-core" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.342565 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="proxy-httpd" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.342575 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c693762c-f8a3-478f-94af-a37403c0243c" containerName="ceilometer-notification-agent" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.344437 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.350779 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.359145 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.359564 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.371133 4800 scope.go:117] "RemoveContainer" containerID="bbfd7d89199e3d9946f9c3923f15404aba7242af32a00e74b20d23a527bb95c2" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.438555 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.438600 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-config-data\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.438807 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-scripts\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.438958 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbxg6\" (UniqueName: \"kubernetes.io/projected/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-kube-api-access-fbxg6\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.439249 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-run-httpd\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.439293 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.439355 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-log-httpd\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.541268 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-run-httpd\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.541936 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-run-httpd\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.542058 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.542177 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-log-httpd\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.542352 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.542482 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-config-data\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.542662 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-scripts\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.542787 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbxg6\" (UniqueName: \"kubernetes.io/projected/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-kube-api-access-fbxg6\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.542785 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-log-httpd\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.548149 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.548359 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.548508 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-config-data\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.549061 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-scripts\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.570774 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbxg6\" (UniqueName: \"kubernetes.io/projected/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-kube-api-access-fbxg6\") pod \"ceilometer-0\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " pod="openstack/ceilometer-0" Nov 25 15:40:22 crc kubenswrapper[4800]: I1125 15:40:22.673324 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:40:23 crc kubenswrapper[4800]: I1125 15:40:23.156415 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:23 crc kubenswrapper[4800]: W1125 15:40:23.157980 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae049c18_b4e2_4b19_9ef6_851ae0c931b6.slice/crio-474ca1d488c8bef149a98f3b1910a2599ad3639745f19a3d76dd46039ac20e3c WatchSource:0}: Error finding container 474ca1d488c8bef149a98f3b1910a2599ad3639745f19a3d76dd46039ac20e3c: Status 404 returned error can't find the container with id 474ca1d488c8bef149a98f3b1910a2599ad3639745f19a3d76dd46039ac20e3c Nov 25 15:40:23 crc kubenswrapper[4800]: I1125 15:40:23.200327 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ae049c18-b4e2-4b19-9ef6-851ae0c931b6","Type":"ContainerStarted","Data":"474ca1d488c8bef149a98f3b1910a2599ad3639745f19a3d76dd46039ac20e3c"} Nov 25 15:40:23 crc kubenswrapper[4800]: I1125 15:40:23.796539 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c693762c-f8a3-478f-94af-a37403c0243c" path="/var/lib/kubelet/pods/c693762c-f8a3-478f-94af-a37403c0243c/volumes" Nov 25 15:40:24 crc kubenswrapper[4800]: I1125 15:40:24.214139 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ae049c18-b4e2-4b19-9ef6-851ae0c931b6","Type":"ContainerStarted","Data":"b89e7a9dba4bb1c66386fad0dd9c9c286885618dd912973f3e8d5f7a0d398db7"} Nov 25 15:40:25 crc kubenswrapper[4800]: I1125 15:40:25.228045 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ae049c18-b4e2-4b19-9ef6-851ae0c931b6","Type":"ContainerStarted","Data":"26ed7f2c89ef65a1719b1cbd6288bf46cd2486472de115d933fcbd86a95c6183"} Nov 25 15:40:25 crc kubenswrapper[4800]: I1125 15:40:25.228459 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ae049c18-b4e2-4b19-9ef6-851ae0c931b6","Type":"ContainerStarted","Data":"2918738060c71248d9dcb811f284ecc6eb3d96cba1c6b0953489f3d8a08366be"} Nov 25 15:40:27 crc kubenswrapper[4800]: I1125 15:40:27.253379 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ae049c18-b4e2-4b19-9ef6-851ae0c931b6","Type":"ContainerStarted","Data":"58e01951376424ee83a7b5c8f9ab2b9d3a61aec8a2f4ba953a10fed6a46bf3d0"} Nov 25 15:40:27 crc kubenswrapper[4800]: I1125 15:40:27.254063 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 15:40:27 crc kubenswrapper[4800]: I1125 15:40:27.276032 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.7250856030000001 podStartE2EDuration="5.27600956s" podCreationTimestamp="2025-11-25 15:40:22 +0000 UTC" firstStartedPulling="2025-11-25 15:40:23.162785292 +0000 UTC m=+1384.217193804" lastFinishedPulling="2025-11-25 15:40:26.713709289 +0000 UTC m=+1387.768117761" observedRunningTime="2025-11-25 15:40:27.274652103 +0000 UTC m=+1388.329060675" watchObservedRunningTime="2025-11-25 15:40:27.27600956 +0000 UTC m=+1388.330418042" Nov 25 15:40:30 crc kubenswrapper[4800]: I1125 15:40:30.730459 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:30 crc kubenswrapper[4800]: I1125 15:40:30.731303 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="ceilometer-central-agent" containerID="cri-o://b89e7a9dba4bb1c66386fad0dd9c9c286885618dd912973f3e8d5f7a0d398db7" gracePeriod=30 Nov 25 15:40:30 crc kubenswrapper[4800]: I1125 15:40:30.731405 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="sg-core" containerID="cri-o://26ed7f2c89ef65a1719b1cbd6288bf46cd2486472de115d933fcbd86a95c6183" gracePeriod=30 Nov 25 15:40:30 crc kubenswrapper[4800]: I1125 15:40:30.731405 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="ceilometer-notification-agent" containerID="cri-o://2918738060c71248d9dcb811f284ecc6eb3d96cba1c6b0953489f3d8a08366be" gracePeriod=30 Nov 25 15:40:30 crc kubenswrapper[4800]: I1125 15:40:30.731414 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="proxy-httpd" containerID="cri-o://58e01951376424ee83a7b5c8f9ab2b9d3a61aec8a2f4ba953a10fed6a46bf3d0" gracePeriod=30 Nov 25 15:40:31 crc kubenswrapper[4800]: I1125 15:40:31.303022 4800 generic.go:334] "Generic (PLEG): container finished" podID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerID="58e01951376424ee83a7b5c8f9ab2b9d3a61aec8a2f4ba953a10fed6a46bf3d0" exitCode=0 Nov 25 15:40:31 crc kubenswrapper[4800]: I1125 15:40:31.303430 4800 generic.go:334] "Generic (PLEG): container finished" podID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerID="26ed7f2c89ef65a1719b1cbd6288bf46cd2486472de115d933fcbd86a95c6183" exitCode=2 Nov 25 15:40:31 crc kubenswrapper[4800]: I1125 15:40:31.303137 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ae049c18-b4e2-4b19-9ef6-851ae0c931b6","Type":"ContainerDied","Data":"58e01951376424ee83a7b5c8f9ab2b9d3a61aec8a2f4ba953a10fed6a46bf3d0"} Nov 25 15:40:31 crc kubenswrapper[4800]: I1125 15:40:31.303519 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ae049c18-b4e2-4b19-9ef6-851ae0c931b6","Type":"ContainerDied","Data":"26ed7f2c89ef65a1719b1cbd6288bf46cd2486472de115d933fcbd86a95c6183"} Nov 25 15:40:32 crc kubenswrapper[4800]: I1125 15:40:32.322998 4800 generic.go:334] "Generic (PLEG): container finished" podID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerID="2918738060c71248d9dcb811f284ecc6eb3d96cba1c6b0953489f3d8a08366be" exitCode=0 Nov 25 15:40:32 crc kubenswrapper[4800]: I1125 15:40:32.323084 4800 generic.go:334] "Generic (PLEG): container finished" podID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerID="b89e7a9dba4bb1c66386fad0dd9c9c286885618dd912973f3e8d5f7a0d398db7" exitCode=0 Nov 25 15:40:32 crc kubenswrapper[4800]: I1125 15:40:32.323087 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ae049c18-b4e2-4b19-9ef6-851ae0c931b6","Type":"ContainerDied","Data":"2918738060c71248d9dcb811f284ecc6eb3d96cba1c6b0953489f3d8a08366be"} Nov 25 15:40:32 crc kubenswrapper[4800]: I1125 15:40:32.323172 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ae049c18-b4e2-4b19-9ef6-851ae0c931b6","Type":"ContainerDied","Data":"b89e7a9dba4bb1c66386fad0dd9c9c286885618dd912973f3e8d5f7a0d398db7"} Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.687898 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.811686 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-config-data\") pod \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.811742 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-sg-core-conf-yaml\") pod \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.811976 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-run-httpd\") pod \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.812048 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-combined-ca-bundle\") pod \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.812245 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-scripts\") pod \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.812289 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fbxg6\" (UniqueName: \"kubernetes.io/projected/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-kube-api-access-fbxg6\") pod \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.812424 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-log-httpd\") pod \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\" (UID: \"ae049c18-b4e2-4b19-9ef6-851ae0c931b6\") " Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.813530 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ae049c18-b4e2-4b19-9ef6-851ae0c931b6" (UID: "ae049c18-b4e2-4b19-9ef6-851ae0c931b6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.813713 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ae049c18-b4e2-4b19-9ef6-851ae0c931b6" (UID: "ae049c18-b4e2-4b19-9ef6-851ae0c931b6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.819390 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-scripts" (OuterVolumeSpecName: "scripts") pod "ae049c18-b4e2-4b19-9ef6-851ae0c931b6" (UID: "ae049c18-b4e2-4b19-9ef6-851ae0c931b6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.826099 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-kube-api-access-fbxg6" (OuterVolumeSpecName: "kube-api-access-fbxg6") pod "ae049c18-b4e2-4b19-9ef6-851ae0c931b6" (UID: "ae049c18-b4e2-4b19-9ef6-851ae0c931b6"). InnerVolumeSpecName "kube-api-access-fbxg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.845641 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ae049c18-b4e2-4b19-9ef6-851ae0c931b6" (UID: "ae049c18-b4e2-4b19-9ef6-851ae0c931b6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.898571 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ae049c18-b4e2-4b19-9ef6-851ae0c931b6" (UID: "ae049c18-b4e2-4b19-9ef6-851ae0c931b6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.917048 4800 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.917099 4800 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.917120 4800 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.917139 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.917157 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.917175 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fbxg6\" (UniqueName: \"kubernetes.io/projected/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-kube-api-access-fbxg6\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:33 crc kubenswrapper[4800]: I1125 15:40:33.945569 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-config-data" (OuterVolumeSpecName: "config-data") pod "ae049c18-b4e2-4b19-9ef6-851ae0c931b6" (UID: "ae049c18-b4e2-4b19-9ef6-851ae0c931b6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.020221 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae049c18-b4e2-4b19-9ef6-851ae0c931b6-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.351573 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ae049c18-b4e2-4b19-9ef6-851ae0c931b6","Type":"ContainerDied","Data":"474ca1d488c8bef149a98f3b1910a2599ad3639745f19a3d76dd46039ac20e3c"} Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.351664 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.351684 4800 scope.go:117] "RemoveContainer" containerID="58e01951376424ee83a7b5c8f9ab2b9d3a61aec8a2f4ba953a10fed6a46bf3d0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.396966 4800 scope.go:117] "RemoveContainer" containerID="26ed7f2c89ef65a1719b1cbd6288bf46cd2486472de115d933fcbd86a95c6183" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.400223 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.427630 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.438416 4800 scope.go:117] "RemoveContainer" containerID="2918738060c71248d9dcb811f284ecc6eb3d96cba1c6b0953489f3d8a08366be" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.438672 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:34 crc kubenswrapper[4800]: E1125 15:40:34.439243 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="proxy-httpd" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.439271 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="proxy-httpd" Nov 25 15:40:34 crc kubenswrapper[4800]: E1125 15:40:34.439315 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="ceilometer-notification-agent" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.439331 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="ceilometer-notification-agent" Nov 25 15:40:34 crc kubenswrapper[4800]: E1125 15:40:34.439345 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="ceilometer-central-agent" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.439356 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="ceilometer-central-agent" Nov 25 15:40:34 crc kubenswrapper[4800]: E1125 15:40:34.439406 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="sg-core" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.439418 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="sg-core" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.439730 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="ceilometer-notification-agent" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.439773 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="ceilometer-central-agent" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.439799 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="proxy-httpd" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.439816 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" containerName="sg-core" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.442425 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.446133 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.446655 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.451787 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.482802 4800 scope.go:117] "RemoveContainer" containerID="b89e7a9dba4bb1c66386fad0dd9c9c286885618dd912973f3e8d5f7a0d398db7" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.531680 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-config-data\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.531740 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-scripts\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.531764 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27fd4305-1e43-4444-9fa5-7ac870390999-run-httpd\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.531926 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27fd4305-1e43-4444-9fa5-7ac870390999-log-httpd\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.532001 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.532042 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.532070 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdmb6\" (UniqueName: \"kubernetes.io/projected/27fd4305-1e43-4444-9fa5-7ac870390999-kube-api-access-fdmb6\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.635443 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27fd4305-1e43-4444-9fa5-7ac870390999-log-httpd\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.635556 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.635603 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.635650 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdmb6\" (UniqueName: \"kubernetes.io/projected/27fd4305-1e43-4444-9fa5-7ac870390999-kube-api-access-fdmb6\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.635907 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-config-data\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.635989 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-scripts\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.636087 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27fd4305-1e43-4444-9fa5-7ac870390999-run-httpd\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.636674 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27fd4305-1e43-4444-9fa5-7ac870390999-log-httpd\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.636684 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27fd4305-1e43-4444-9fa5-7ac870390999-run-httpd\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.640062 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-scripts\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.640526 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.642584 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-config-data\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.646050 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.660633 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdmb6\" (UniqueName: \"kubernetes.io/projected/27fd4305-1e43-4444-9fa5-7ac870390999-kube-api-access-fdmb6\") pod \"ceilometer-0\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " pod="openstack/ceilometer-0" Nov 25 15:40:34 crc kubenswrapper[4800]: I1125 15:40:34.769225 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:40:35 crc kubenswrapper[4800]: I1125 15:40:35.091383 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:40:35 crc kubenswrapper[4800]: W1125 15:40:35.096264 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27fd4305_1e43_4444_9fa5_7ac870390999.slice/crio-12258d9bb9db40ab17165c45271a8e96d7d7a4f3fe6480dad2e5a1bcc5621bb3 WatchSource:0}: Error finding container 12258d9bb9db40ab17165c45271a8e96d7d7a4f3fe6480dad2e5a1bcc5621bb3: Status 404 returned error can't find the container with id 12258d9bb9db40ab17165c45271a8e96d7d7a4f3fe6480dad2e5a1bcc5621bb3 Nov 25 15:40:35 crc kubenswrapper[4800]: I1125 15:40:35.099965 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 15:40:35 crc kubenswrapper[4800]: I1125 15:40:35.368498 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27fd4305-1e43-4444-9fa5-7ac870390999","Type":"ContainerStarted","Data":"12258d9bb9db40ab17165c45271a8e96d7d7a4f3fe6480dad2e5a1bcc5621bb3"} Nov 25 15:40:35 crc kubenswrapper[4800]: I1125 15:40:35.803203 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae049c18-b4e2-4b19-9ef6-851ae0c931b6" path="/var/lib/kubelet/pods/ae049c18-b4e2-4b19-9ef6-851ae0c931b6/volumes" Nov 25 15:40:36 crc kubenswrapper[4800]: I1125 15:40:36.380739 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27fd4305-1e43-4444-9fa5-7ac870390999","Type":"ContainerStarted","Data":"9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9"} Nov 25 15:40:37 crc kubenswrapper[4800]: I1125 15:40:37.406881 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27fd4305-1e43-4444-9fa5-7ac870390999","Type":"ContainerStarted","Data":"f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd"} Nov 25 15:40:37 crc kubenswrapper[4800]: I1125 15:40:37.407396 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27fd4305-1e43-4444-9fa5-7ac870390999","Type":"ContainerStarted","Data":"1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25"} Nov 25 15:40:38 crc kubenswrapper[4800]: I1125 15:40:38.429924 4800 generic.go:334] "Generic (PLEG): container finished" podID="8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07" containerID="8cc7a09fd59fda2d84fff87b627a178d74a7daa78e88cec08de27cf6c01eaeb6" exitCode=0 Nov 25 15:40:38 crc kubenswrapper[4800]: I1125 15:40:38.430035 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5nblp" event={"ID":"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07","Type":"ContainerDied","Data":"8cc7a09fd59fda2d84fff87b627a178d74a7daa78e88cec08de27cf6c01eaeb6"} Nov 25 15:40:39 crc kubenswrapper[4800]: I1125 15:40:39.444491 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27fd4305-1e43-4444-9fa5-7ac870390999","Type":"ContainerStarted","Data":"8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183"} Nov 25 15:40:39 crc kubenswrapper[4800]: I1125 15:40:39.477705 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.201818855 podStartE2EDuration="5.477681821s" podCreationTimestamp="2025-11-25 15:40:34 +0000 UTC" firstStartedPulling="2025-11-25 15:40:35.099569614 +0000 UTC m=+1396.153978096" lastFinishedPulling="2025-11-25 15:40:38.37543258 +0000 UTC m=+1399.429841062" observedRunningTime="2025-11-25 15:40:39.472183028 +0000 UTC m=+1400.526591500" watchObservedRunningTime="2025-11-25 15:40:39.477681821 +0000 UTC m=+1400.532090303" Nov 25 15:40:39 crc kubenswrapper[4800]: I1125 15:40:39.805477 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:39 crc kubenswrapper[4800]: I1125 15:40:39.951303 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9xr9\" (UniqueName: \"kubernetes.io/projected/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-kube-api-access-k9xr9\") pod \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " Nov 25 15:40:39 crc kubenswrapper[4800]: I1125 15:40:39.951482 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-config-data\") pod \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " Nov 25 15:40:39 crc kubenswrapper[4800]: I1125 15:40:39.951590 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-scripts\") pod \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " Nov 25 15:40:39 crc kubenswrapper[4800]: I1125 15:40:39.951643 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-combined-ca-bundle\") pod \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\" (UID: \"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07\") " Nov 25 15:40:39 crc kubenswrapper[4800]: I1125 15:40:39.971008 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-scripts" (OuterVolumeSpecName: "scripts") pod "8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07" (UID: "8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:39 crc kubenswrapper[4800]: I1125 15:40:39.971263 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-kube-api-access-k9xr9" (OuterVolumeSpecName: "kube-api-access-k9xr9") pod "8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07" (UID: "8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07"). InnerVolumeSpecName "kube-api-access-k9xr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:39 crc kubenswrapper[4800]: I1125 15:40:39.978516 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07" (UID: "8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:39 crc kubenswrapper[4800]: I1125 15:40:39.992292 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-config-data" (OuterVolumeSpecName: "config-data") pod "8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07" (UID: "8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.054426 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.054475 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.054490 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.054509 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9xr9\" (UniqueName: \"kubernetes.io/projected/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07-kube-api-access-k9xr9\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.456383 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5nblp" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.465644 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5nblp" event={"ID":"8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07","Type":"ContainerDied","Data":"20c205ab7d57b5914d3bf0d34156fcea8eb707633beae8e55de674183ae22acb"} Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.465720 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20c205ab7d57b5914d3bf0d34156fcea8eb707633beae8e55de674183ae22acb" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.465768 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.586871 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 15:40:40 crc kubenswrapper[4800]: E1125 15:40:40.587328 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07" containerName="nova-cell0-conductor-db-sync" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.587343 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07" containerName="nova-cell0-conductor-db-sync" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.587539 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07" containerName="nova-cell0-conductor-db-sync" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.588405 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.590965 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-w9n88" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.595204 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.609768 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.672062 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b71b2770-3cf3-4621-880f-e8e39e94771d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"b71b2770-3cf3-4621-880f-e8e39e94771d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.672232 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggqbz\" (UniqueName: \"kubernetes.io/projected/b71b2770-3cf3-4621-880f-e8e39e94771d-kube-api-access-ggqbz\") pod \"nova-cell0-conductor-0\" (UID: \"b71b2770-3cf3-4621-880f-e8e39e94771d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.672510 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b71b2770-3cf3-4621-880f-e8e39e94771d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"b71b2770-3cf3-4621-880f-e8e39e94771d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.775057 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b71b2770-3cf3-4621-880f-e8e39e94771d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"b71b2770-3cf3-4621-880f-e8e39e94771d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.776123 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b71b2770-3cf3-4621-880f-e8e39e94771d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"b71b2770-3cf3-4621-880f-e8e39e94771d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.776235 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggqbz\" (UniqueName: \"kubernetes.io/projected/b71b2770-3cf3-4621-880f-e8e39e94771d-kube-api-access-ggqbz\") pod \"nova-cell0-conductor-0\" (UID: \"b71b2770-3cf3-4621-880f-e8e39e94771d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.781187 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b71b2770-3cf3-4621-880f-e8e39e94771d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"b71b2770-3cf3-4621-880f-e8e39e94771d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.797209 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggqbz\" (UniqueName: \"kubernetes.io/projected/b71b2770-3cf3-4621-880f-e8e39e94771d-kube-api-access-ggqbz\") pod \"nova-cell0-conductor-0\" (UID: \"b71b2770-3cf3-4621-880f-e8e39e94771d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.800409 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b71b2770-3cf3-4621-880f-e8e39e94771d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"b71b2770-3cf3-4621-880f-e8e39e94771d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:40 crc kubenswrapper[4800]: I1125 15:40:40.913712 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:41 crc kubenswrapper[4800]: I1125 15:40:41.370400 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 15:40:41 crc kubenswrapper[4800]: I1125 15:40:41.469515 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"b71b2770-3cf3-4621-880f-e8e39e94771d","Type":"ContainerStarted","Data":"fc3194aa297b6535050d77ea52c2bed79c8b8381c2265e58928eb6d5e0e897b4"} Nov 25 15:40:42 crc kubenswrapper[4800]: I1125 15:40:42.484234 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"b71b2770-3cf3-4621-880f-e8e39e94771d","Type":"ContainerStarted","Data":"11107a9e48e0c49603185e3758b4db19d43e3c0801b9edb498ee2141b6b710ae"} Nov 25 15:40:42 crc kubenswrapper[4800]: I1125 15:40:42.484668 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:42 crc kubenswrapper[4800]: I1125 15:40:42.519334 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.519305691 podStartE2EDuration="2.519305691s" podCreationTimestamp="2025-11-25 15:40:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:40:42.503168701 +0000 UTC m=+1403.557577223" watchObservedRunningTime="2025-11-25 15:40:42.519305691 +0000 UTC m=+1403.573714193" Nov 25 15:40:42 crc kubenswrapper[4800]: I1125 15:40:42.639977 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:40:42 crc kubenswrapper[4800]: I1125 15:40:42.640090 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:40:50 crc kubenswrapper[4800]: I1125 15:40:50.948645 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.533179 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-84lxn"] Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.536425 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.540253 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.540642 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.568489 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-84lxn"] Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.606826 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-config-data\") pod \"nova-cell0-cell-mapping-84lxn\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.606953 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-84lxn\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.607026 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw59z\" (UniqueName: \"kubernetes.io/projected/a1268195-54b6-4a47-bcb7-eb573bb91209-kube-api-access-gw59z\") pod \"nova-cell0-cell-mapping-84lxn\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.607061 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-scripts\") pod \"nova-cell0-cell-mapping-84lxn\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.709268 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-config-data\") pod \"nova-cell0-cell-mapping-84lxn\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.709970 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-84lxn\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.710146 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw59z\" (UniqueName: \"kubernetes.io/projected/a1268195-54b6-4a47-bcb7-eb573bb91209-kube-api-access-gw59z\") pod \"nova-cell0-cell-mapping-84lxn\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.710260 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-scripts\") pod \"nova-cell0-cell-mapping-84lxn\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.726322 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-scripts\") pod \"nova-cell0-cell-mapping-84lxn\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.728533 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-config-data\") pod \"nova-cell0-cell-mapping-84lxn\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.734725 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-84lxn\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.751125 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw59z\" (UniqueName: \"kubernetes.io/projected/a1268195-54b6-4a47-bcb7-eb573bb91209-kube-api-access-gw59z\") pod \"nova-cell0-cell-mapping-84lxn\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.825210 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.826726 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.826827 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.832388 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.835556 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.837145 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.842396 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.871238 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.892106 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.923660 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9b37821-9578-4c63-b27a-194684167a87-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c9b37821-9578-4c63-b27a-194684167a87\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.923778 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvmc6\" (UniqueName: \"kubernetes.io/projected/a720b7c6-a205-4da7-bdb2-98e8ac932d17-kube-api-access-bvmc6\") pod \"nova-scheduler-0\" (UID: \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\") " pod="openstack/nova-scheduler-0" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.923860 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a720b7c6-a205-4da7-bdb2-98e8ac932d17-config-data\") pod \"nova-scheduler-0\" (UID: \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\") " pod="openstack/nova-scheduler-0" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.923882 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a720b7c6-a205-4da7-bdb2-98e8ac932d17-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\") " pod="openstack/nova-scheduler-0" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.923975 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fcjt\" (UniqueName: \"kubernetes.io/projected/c9b37821-9578-4c63-b27a-194684167a87-kube-api-access-4fcjt\") pod \"nova-cell1-novncproxy-0\" (UID: \"c9b37821-9578-4c63-b27a-194684167a87\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.924028 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9b37821-9578-4c63-b27a-194684167a87-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c9b37821-9578-4c63-b27a-194684167a87\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.966299 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.968168 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:40:51 crc kubenswrapper[4800]: I1125 15:40:51.972089 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.003042 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.027581 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/033cc45c-99b1-4199-aee4-13218fbd5f32-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.027686 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9b37821-9578-4c63-b27a-194684167a87-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c9b37821-9578-4c63-b27a-194684167a87\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.027719 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/033cc45c-99b1-4199-aee4-13218fbd5f32-logs\") pod \"nova-api-0\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.027789 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ghrd\" (UniqueName: \"kubernetes.io/projected/033cc45c-99b1-4199-aee4-13218fbd5f32-kube-api-access-5ghrd\") pod \"nova-api-0\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.027831 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvmc6\" (UniqueName: \"kubernetes.io/projected/a720b7c6-a205-4da7-bdb2-98e8ac932d17-kube-api-access-bvmc6\") pod \"nova-scheduler-0\" (UID: \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\") " pod="openstack/nova-scheduler-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.027903 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a720b7c6-a205-4da7-bdb2-98e8ac932d17-config-data\") pod \"nova-scheduler-0\" (UID: \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\") " pod="openstack/nova-scheduler-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.027926 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a720b7c6-a205-4da7-bdb2-98e8ac932d17-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\") " pod="openstack/nova-scheduler-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.028011 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/033cc45c-99b1-4199-aee4-13218fbd5f32-config-data\") pod \"nova-api-0\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.028038 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fcjt\" (UniqueName: \"kubernetes.io/projected/c9b37821-9578-4c63-b27a-194684167a87-kube-api-access-4fcjt\") pod \"nova-cell1-novncproxy-0\" (UID: \"c9b37821-9578-4c63-b27a-194684167a87\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.028070 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9b37821-9578-4c63-b27a-194684167a87-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c9b37821-9578-4c63-b27a-194684167a87\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.043365 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a720b7c6-a205-4da7-bdb2-98e8ac932d17-config-data\") pod \"nova-scheduler-0\" (UID: \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\") " pod="openstack/nova-scheduler-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.043971 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9b37821-9578-4c63-b27a-194684167a87-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c9b37821-9578-4c63-b27a-194684167a87\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.051223 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9b37821-9578-4c63-b27a-194684167a87-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c9b37821-9578-4c63-b27a-194684167a87\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.078753 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvmc6\" (UniqueName: \"kubernetes.io/projected/a720b7c6-a205-4da7-bdb2-98e8ac932d17-kube-api-access-bvmc6\") pod \"nova-scheduler-0\" (UID: \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\") " pod="openstack/nova-scheduler-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.085893 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fcjt\" (UniqueName: \"kubernetes.io/projected/c9b37821-9578-4c63-b27a-194684167a87-kube-api-access-4fcjt\") pod \"nova-cell1-novncproxy-0\" (UID: \"c9b37821-9578-4c63-b27a-194684167a87\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.090712 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a720b7c6-a205-4da7-bdb2-98e8ac932d17-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\") " pod="openstack/nova-scheduler-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.090815 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.093019 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.097112 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.130020 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ghrd\" (UniqueName: \"kubernetes.io/projected/033cc45c-99b1-4199-aee4-13218fbd5f32-kube-api-access-5ghrd\") pod \"nova-api-0\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.130105 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1944c12b-e7c0-4c00-ab74-1c735daf2eef-logs\") pod \"nova-metadata-0\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.130149 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1944c12b-e7c0-4c00-ab74-1c735daf2eef-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.130195 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/033cc45c-99b1-4199-aee4-13218fbd5f32-config-data\") pod \"nova-api-0\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.130226 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1944c12b-e7c0-4c00-ab74-1c735daf2eef-config-data\") pod \"nova-metadata-0\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.130270 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2z8w\" (UniqueName: \"kubernetes.io/projected/1944c12b-e7c0-4c00-ab74-1c735daf2eef-kube-api-access-d2z8w\") pod \"nova-metadata-0\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.130293 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/033cc45c-99b1-4199-aee4-13218fbd5f32-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.130339 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/033cc45c-99b1-4199-aee4-13218fbd5f32-logs\") pod \"nova-api-0\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.130755 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/033cc45c-99b1-4199-aee4-13218fbd5f32-logs\") pod \"nova-api-0\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.136451 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.141507 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/033cc45c-99b1-4199-aee4-13218fbd5f32-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.141698 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/033cc45c-99b1-4199-aee4-13218fbd5f32-config-data\") pod \"nova-api-0\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.151085 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f7bbc55bc-hdtvr"] Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.152762 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.159998 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ghrd\" (UniqueName: \"kubernetes.io/projected/033cc45c-99b1-4199-aee4-13218fbd5f32-kube-api-access-5ghrd\") pod \"nova-api-0\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.176462 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.191173 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f7bbc55bc-hdtvr"] Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.205454 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.304700 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-dns-svc\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.304870 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-ovsdbserver-nb\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.305008 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-ovsdbserver-sb\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.305110 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1944c12b-e7c0-4c00-ab74-1c735daf2eef-logs\") pod \"nova-metadata-0\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.305185 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-config\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.305240 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1944c12b-e7c0-4c00-ab74-1c735daf2eef-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.305317 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s42hn\" (UniqueName: \"kubernetes.io/projected/45a8dfaa-a31d-4d43-b445-cc559b7420f3-kube-api-access-s42hn\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.307637 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1944c12b-e7c0-4c00-ab74-1c735daf2eef-logs\") pod \"nova-metadata-0\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.307702 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1944c12b-e7c0-4c00-ab74-1c735daf2eef-config-data\") pod \"nova-metadata-0\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.308024 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2z8w\" (UniqueName: \"kubernetes.io/projected/1944c12b-e7c0-4c00-ab74-1c735daf2eef-kube-api-access-d2z8w\") pod \"nova-metadata-0\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.344613 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1944c12b-e7c0-4c00-ab74-1c735daf2eef-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.345469 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1944c12b-e7c0-4c00-ab74-1c735daf2eef-config-data\") pod \"nova-metadata-0\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.345658 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2z8w\" (UniqueName: \"kubernetes.io/projected/1944c12b-e7c0-4c00-ab74-1c735daf2eef-kube-api-access-d2z8w\") pod \"nova-metadata-0\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.411381 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-dns-svc\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.411451 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-ovsdbserver-nb\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.411493 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-ovsdbserver-sb\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.411536 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-config\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.411578 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s42hn\" (UniqueName: \"kubernetes.io/projected/45a8dfaa-a31d-4d43-b445-cc559b7420f3-kube-api-access-s42hn\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.418423 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-dns-svc\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.419014 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-ovsdbserver-nb\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.419524 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-ovsdbserver-sb\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.420092 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-config\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.421428 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.452394 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.455546 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s42hn\" (UniqueName: \"kubernetes.io/projected/45a8dfaa-a31d-4d43-b445-cc559b7420f3-kube-api-access-s42hn\") pod \"dnsmasq-dns-f7bbc55bc-hdtvr\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.494940 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:52 crc kubenswrapper[4800]: I1125 15:40:52.886202 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-84lxn"] Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.077274 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 15:40:53 crc kubenswrapper[4800]: W1125 15:40:53.117093 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9b37821_9578_4c63_b27a_194684167a87.slice/crio-b107c621a2a9ba1aab6700b28a7c82577d35cb68114c6977ee4ea846ff71f864 WatchSource:0}: Error finding container b107c621a2a9ba1aab6700b28a7c82577d35cb68114c6977ee4ea846ff71f864: Status 404 returned error can't find the container with id b107c621a2a9ba1aab6700b28a7c82577d35cb68114c6977ee4ea846ff71f864 Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.186699 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-v2d7j"] Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.188694 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: W1125 15:40:53.192302 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda720b7c6_a205_4da7_bdb2_98e8ac932d17.slice/crio-9f00be0beb00672856bb03b27af497cfdc0be056a956a9fce329196a7befee7a WatchSource:0}: Error finding container 9f00be0beb00672856bb03b27af497cfdc0be056a956a9fce329196a7befee7a: Status 404 returned error can't find the container with id 9f00be0beb00672856bb03b27af497cfdc0be056a956a9fce329196a7befee7a Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.193476 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.193906 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.214720 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.223572 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-v2d7j"] Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.277574 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.287430 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f7bbc55bc-hdtvr"] Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.337818 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-config-data\") pod \"nova-cell1-conductor-db-sync-v2d7j\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.337915 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-scripts\") pod \"nova-cell1-conductor-db-sync-v2d7j\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.337985 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrg54\" (UniqueName: \"kubernetes.io/projected/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-kube-api-access-mrg54\") pod \"nova-cell1-conductor-db-sync-v2d7j\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.338016 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-v2d7j\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.357972 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:40:53 crc kubenswrapper[4800]: W1125 15:40:53.370161 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod033cc45c_99b1_4199_aee4_13218fbd5f32.slice/crio-ce0fcfaa23bcdcad6aeec98e4ee14cce9c9eb0d25321a698632363f99883ed66 WatchSource:0}: Error finding container ce0fcfaa23bcdcad6aeec98e4ee14cce9c9eb0d25321a698632363f99883ed66: Status 404 returned error can't find the container with id ce0fcfaa23bcdcad6aeec98e4ee14cce9c9eb0d25321a698632363f99883ed66 Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.439589 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-config-data\") pod \"nova-cell1-conductor-db-sync-v2d7j\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.439637 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-scripts\") pod \"nova-cell1-conductor-db-sync-v2d7j\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.439691 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrg54\" (UniqueName: \"kubernetes.io/projected/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-kube-api-access-mrg54\") pod \"nova-cell1-conductor-db-sync-v2d7j\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.439711 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-v2d7j\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.445982 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-config-data\") pod \"nova-cell1-conductor-db-sync-v2d7j\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.447522 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-scripts\") pod \"nova-cell1-conductor-db-sync-v2d7j\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.448082 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-v2d7j\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.460724 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrg54\" (UniqueName: \"kubernetes.io/projected/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-kube-api-access-mrg54\") pod \"nova-cell1-conductor-db-sync-v2d7j\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.509935 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.662718 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a720b7c6-a205-4da7-bdb2-98e8ac932d17","Type":"ContainerStarted","Data":"9f00be0beb00672856bb03b27af497cfdc0be056a956a9fce329196a7befee7a"} Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.668214 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c9b37821-9578-4c63-b27a-194684167a87","Type":"ContainerStarted","Data":"b107c621a2a9ba1aab6700b28a7c82577d35cb68114c6977ee4ea846ff71f864"} Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.685618 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-84lxn" event={"ID":"a1268195-54b6-4a47-bcb7-eb573bb91209","Type":"ContainerStarted","Data":"5e340500223746d502aa5fe0ab3187adbb7f252d3127a9b39dac6998f5009185"} Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.686062 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-84lxn" event={"ID":"a1268195-54b6-4a47-bcb7-eb573bb91209","Type":"ContainerStarted","Data":"096f7f5688d8b8a2c0d0fbb71c295e050bb0835cae33cbd8d070162c4e182513"} Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.691267 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1944c12b-e7c0-4c00-ab74-1c735daf2eef","Type":"ContainerStarted","Data":"06317b4d2f967533bee6e3aebf89e41a6f9361df7b6513a82834ea4f47e813eb"} Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.693703 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"033cc45c-99b1-4199-aee4-13218fbd5f32","Type":"ContainerStarted","Data":"ce0fcfaa23bcdcad6aeec98e4ee14cce9c9eb0d25321a698632363f99883ed66"} Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.710854 4800 generic.go:334] "Generic (PLEG): container finished" podID="45a8dfaa-a31d-4d43-b445-cc559b7420f3" containerID="975244a46350810f89cc531b9b338d81d0de00f8977c272d738e89fdb666cafd" exitCode=0 Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.710925 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" event={"ID":"45a8dfaa-a31d-4d43-b445-cc559b7420f3","Type":"ContainerDied","Data":"975244a46350810f89cc531b9b338d81d0de00f8977c272d738e89fdb666cafd"} Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.710969 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" event={"ID":"45a8dfaa-a31d-4d43-b445-cc559b7420f3","Type":"ContainerStarted","Data":"80157c60fae850124d05d2526950e91d7152849efded36a186375e2e23fa94a0"} Nov 25 15:40:53 crc kubenswrapper[4800]: I1125 15:40:53.726912 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-84lxn" podStartSLOduration=2.726890052 podStartE2EDuration="2.726890052s" podCreationTimestamp="2025-11-25 15:40:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:40:53.704669654 +0000 UTC m=+1414.759078136" watchObservedRunningTime="2025-11-25 15:40:53.726890052 +0000 UTC m=+1414.781298534" Nov 25 15:40:54 crc kubenswrapper[4800]: I1125 15:40:54.047972 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-v2d7j"] Nov 25 15:40:54 crc kubenswrapper[4800]: W1125 15:40:54.058716 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f050231_ed56_42ad_aeb8_4a79ed9ed00f.slice/crio-c47716fa899bb0507ea92aeef16f1f87622faceb9382bd90b904ec9fd5059c7d WatchSource:0}: Error finding container c47716fa899bb0507ea92aeef16f1f87622faceb9382bd90b904ec9fd5059c7d: Status 404 returned error can't find the container with id c47716fa899bb0507ea92aeef16f1f87622faceb9382bd90b904ec9fd5059c7d Nov 25 15:40:54 crc kubenswrapper[4800]: I1125 15:40:54.778174 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" event={"ID":"45a8dfaa-a31d-4d43-b445-cc559b7420f3","Type":"ContainerStarted","Data":"993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df"} Nov 25 15:40:54 crc kubenswrapper[4800]: I1125 15:40:54.784051 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:40:54 crc kubenswrapper[4800]: I1125 15:40:54.791456 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-v2d7j" event={"ID":"3f050231-ed56-42ad-aeb8-4a79ed9ed00f","Type":"ContainerStarted","Data":"65fdf81506c1f3b0f9184d604c33756baf4d866f3d20d76d05c3e1ea701c8c41"} Nov 25 15:40:54 crc kubenswrapper[4800]: I1125 15:40:54.791513 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-v2d7j" event={"ID":"3f050231-ed56-42ad-aeb8-4a79ed9ed00f","Type":"ContainerStarted","Data":"c47716fa899bb0507ea92aeef16f1f87622faceb9382bd90b904ec9fd5059c7d"} Nov 25 15:40:54 crc kubenswrapper[4800]: I1125 15:40:54.848144 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" podStartSLOduration=2.848087952 podStartE2EDuration="2.848087952s" podCreationTimestamp="2025-11-25 15:40:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:40:54.83007446 +0000 UTC m=+1415.884482932" watchObservedRunningTime="2025-11-25 15:40:54.848087952 +0000 UTC m=+1415.902496434" Nov 25 15:40:54 crc kubenswrapper[4800]: I1125 15:40:54.851678 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-v2d7j" podStartSLOduration=1.8516499610000001 podStartE2EDuration="1.851649961s" podCreationTimestamp="2025-11-25 15:40:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:40:54.846151138 +0000 UTC m=+1415.900559620" watchObservedRunningTime="2025-11-25 15:40:54.851649961 +0000 UTC m=+1415.906058443" Nov 25 15:40:56 crc kubenswrapper[4800]: I1125 15:40:56.050596 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:40:56 crc kubenswrapper[4800]: I1125 15:40:56.060600 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.831704 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"033cc45c-99b1-4199-aee4-13218fbd5f32","Type":"ContainerStarted","Data":"985dfce5809b4c785edba2049bebecb72e0789c1d7cb14bce8ffddccd5aeb0a5"} Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.832654 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"033cc45c-99b1-4199-aee4-13218fbd5f32","Type":"ContainerStarted","Data":"7677d067723edc8673c57f40b29b4a8d1c846b0a612636c75f84a303f7819a61"} Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.834336 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1944c12b-e7c0-4c00-ab74-1c735daf2eef","Type":"ContainerStarted","Data":"f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d"} Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.834377 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1944c12b-e7c0-4c00-ab74-1c735daf2eef","Type":"ContainerStarted","Data":"24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61"} Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.834404 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1944c12b-e7c0-4c00-ab74-1c735daf2eef" containerName="nova-metadata-log" containerID="cri-o://24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61" gracePeriod=30 Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.834463 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1944c12b-e7c0-4c00-ab74-1c735daf2eef" containerName="nova-metadata-metadata" containerID="cri-o://f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d" gracePeriod=30 Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.839114 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a720b7c6-a205-4da7-bdb2-98e8ac932d17","Type":"ContainerStarted","Data":"0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75"} Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.846089 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c9b37821-9578-4c63-b27a-194684167a87","Type":"ContainerStarted","Data":"8b7c3a999bd3d784ab71760c6c3973147ac2b3b18484568022240afe2d978487"} Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.846240 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="c9b37821-9578-4c63-b27a-194684167a87" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://8b7c3a999bd3d784ab71760c6c3973147ac2b3b18484568022240afe2d978487" gracePeriod=30 Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.862902 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.49151633 podStartE2EDuration="6.862873545s" podCreationTimestamp="2025-11-25 15:40:51 +0000 UTC" firstStartedPulling="2025-11-25 15:40:53.372954024 +0000 UTC m=+1414.427362506" lastFinishedPulling="2025-11-25 15:40:56.744311229 +0000 UTC m=+1417.798719721" observedRunningTime="2025-11-25 15:40:57.861788145 +0000 UTC m=+1418.916196627" watchObservedRunningTime="2025-11-25 15:40:57.862873545 +0000 UTC m=+1418.917282027" Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.891204 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.432868006 podStartE2EDuration="5.891178053s" podCreationTimestamp="2025-11-25 15:40:52 +0000 UTC" firstStartedPulling="2025-11-25 15:40:53.28306386 +0000 UTC m=+1414.337472332" lastFinishedPulling="2025-11-25 15:40:56.741373897 +0000 UTC m=+1417.795782379" observedRunningTime="2025-11-25 15:40:57.884557009 +0000 UTC m=+1418.938965491" watchObservedRunningTime="2025-11-25 15:40:57.891178053 +0000 UTC m=+1418.945586535" Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.911002 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.366335193 podStartE2EDuration="6.910979275s" podCreationTimestamp="2025-11-25 15:40:51 +0000 UTC" firstStartedPulling="2025-11-25 15:40:53.197129496 +0000 UTC m=+1414.251537978" lastFinishedPulling="2025-11-25 15:40:56.741773578 +0000 UTC m=+1417.796182060" observedRunningTime="2025-11-25 15:40:57.904830784 +0000 UTC m=+1418.959239266" watchObservedRunningTime="2025-11-25 15:40:57.910979275 +0000 UTC m=+1418.965387747" Nov 25 15:40:57 crc kubenswrapper[4800]: I1125 15:40:57.927996 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.308578605 podStartE2EDuration="6.927974988s" podCreationTimestamp="2025-11-25 15:40:51 +0000 UTC" firstStartedPulling="2025-11-25 15:40:53.122036736 +0000 UTC m=+1414.176445218" lastFinishedPulling="2025-11-25 15:40:56.741433119 +0000 UTC m=+1417.795841601" observedRunningTime="2025-11-25 15:40:57.925441817 +0000 UTC m=+1418.979850309" watchObservedRunningTime="2025-11-25 15:40:57.927974988 +0000 UTC m=+1418.982383470" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.441487 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.568928 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1944c12b-e7c0-4c00-ab74-1c735daf2eef-logs\") pod \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.569103 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1944c12b-e7c0-4c00-ab74-1c735daf2eef-combined-ca-bundle\") pod \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.569206 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2z8w\" (UniqueName: \"kubernetes.io/projected/1944c12b-e7c0-4c00-ab74-1c735daf2eef-kube-api-access-d2z8w\") pod \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.569238 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1944c12b-e7c0-4c00-ab74-1c735daf2eef-config-data\") pod \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\" (UID: \"1944c12b-e7c0-4c00-ab74-1c735daf2eef\") " Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.569493 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1944c12b-e7c0-4c00-ab74-1c735daf2eef-logs" (OuterVolumeSpecName: "logs") pod "1944c12b-e7c0-4c00-ab74-1c735daf2eef" (UID: "1944c12b-e7c0-4c00-ab74-1c735daf2eef"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.569826 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1944c12b-e7c0-4c00-ab74-1c735daf2eef-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.576504 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1944c12b-e7c0-4c00-ab74-1c735daf2eef-kube-api-access-d2z8w" (OuterVolumeSpecName: "kube-api-access-d2z8w") pod "1944c12b-e7c0-4c00-ab74-1c735daf2eef" (UID: "1944c12b-e7c0-4c00-ab74-1c735daf2eef"). InnerVolumeSpecName "kube-api-access-d2z8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.599684 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1944c12b-e7c0-4c00-ab74-1c735daf2eef-config-data" (OuterVolumeSpecName: "config-data") pod "1944c12b-e7c0-4c00-ab74-1c735daf2eef" (UID: "1944c12b-e7c0-4c00-ab74-1c735daf2eef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.620979 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1944c12b-e7c0-4c00-ab74-1c735daf2eef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1944c12b-e7c0-4c00-ab74-1c735daf2eef" (UID: "1944c12b-e7c0-4c00-ab74-1c735daf2eef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.671660 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1944c12b-e7c0-4c00-ab74-1c735daf2eef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.672041 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2z8w\" (UniqueName: \"kubernetes.io/projected/1944c12b-e7c0-4c00-ab74-1c735daf2eef-kube-api-access-d2z8w\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.672161 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1944c12b-e7c0-4c00-ab74-1c735daf2eef-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.864722 4800 generic.go:334] "Generic (PLEG): container finished" podID="1944c12b-e7c0-4c00-ab74-1c735daf2eef" containerID="f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d" exitCode=0 Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.866056 4800 generic.go:334] "Generic (PLEG): container finished" podID="1944c12b-e7c0-4c00-ab74-1c735daf2eef" containerID="24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61" exitCode=143 Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.867442 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.868321 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1944c12b-e7c0-4c00-ab74-1c735daf2eef","Type":"ContainerDied","Data":"f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d"} Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.868408 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1944c12b-e7c0-4c00-ab74-1c735daf2eef","Type":"ContainerDied","Data":"24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61"} Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.868430 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1944c12b-e7c0-4c00-ab74-1c735daf2eef","Type":"ContainerDied","Data":"06317b4d2f967533bee6e3aebf89e41a6f9361df7b6513a82834ea4f47e813eb"} Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.868450 4800 scope.go:117] "RemoveContainer" containerID="f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.930938 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.944107 4800 scope.go:117] "RemoveContainer" containerID="24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.961737 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.976616 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:40:58 crc kubenswrapper[4800]: E1125 15:40:58.977199 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1944c12b-e7c0-4c00-ab74-1c735daf2eef" containerName="nova-metadata-metadata" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.977252 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1944c12b-e7c0-4c00-ab74-1c735daf2eef" containerName="nova-metadata-metadata" Nov 25 15:40:58 crc kubenswrapper[4800]: E1125 15:40:58.977291 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1944c12b-e7c0-4c00-ab74-1c735daf2eef" containerName="nova-metadata-log" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.977301 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1944c12b-e7c0-4c00-ab74-1c735daf2eef" containerName="nova-metadata-log" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.977519 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1944c12b-e7c0-4c00-ab74-1c735daf2eef" containerName="nova-metadata-log" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.977552 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1944c12b-e7c0-4c00-ab74-1c735daf2eef" containerName="nova-metadata-metadata" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.978897 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.984305 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 15:40:58 crc kubenswrapper[4800]: I1125 15:40:58.993027 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.010191 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.018714 4800 scope.go:117] "RemoveContainer" containerID="f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d" Nov 25 15:40:59 crc kubenswrapper[4800]: E1125 15:40:59.020778 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d\": container with ID starting with f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d not found: ID does not exist" containerID="f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.020827 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d"} err="failed to get container status \"f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d\": rpc error: code = NotFound desc = could not find container \"f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d\": container with ID starting with f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d not found: ID does not exist" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.020877 4800 scope.go:117] "RemoveContainer" containerID="24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61" Nov 25 15:40:59 crc kubenswrapper[4800]: E1125 15:40:59.023812 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61\": container with ID starting with 24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61 not found: ID does not exist" containerID="24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.023915 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61"} err="failed to get container status \"24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61\": rpc error: code = NotFound desc = could not find container \"24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61\": container with ID starting with 24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61 not found: ID does not exist" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.023989 4800 scope.go:117] "RemoveContainer" containerID="f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.025021 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d"} err="failed to get container status \"f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d\": rpc error: code = NotFound desc = could not find container \"f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d\": container with ID starting with f33c09c7d6e1a3555bdfec1654cf61cc8ac0655669b6c6d5245f1d8ee3d1c41d not found: ID does not exist" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.025081 4800 scope.go:117] "RemoveContainer" containerID="24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.025941 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61"} err="failed to get container status \"24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61\": rpc error: code = NotFound desc = could not find container \"24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61\": container with ID starting with 24d8b9372d95b5e389af9ec39dd8cf46fb6fdc03ce63c79cab684b877af4bb61 not found: ID does not exist" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.085606 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2h9rv\" (UniqueName: \"kubernetes.io/projected/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-kube-api-access-2h9rv\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.085691 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.085897 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.085953 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-logs\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.085991 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-config-data\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.188333 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-config-data\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.188411 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2h9rv\" (UniqueName: \"kubernetes.io/projected/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-kube-api-access-2h9rv\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.188456 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.188522 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.188574 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-logs\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.189136 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-logs\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.194331 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-config-data\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.194628 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.207228 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.209616 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2h9rv\" (UniqueName: \"kubernetes.io/projected/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-kube-api-access-2h9rv\") pod \"nova-metadata-0\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.375507 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.811776 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1944c12b-e7c0-4c00-ab74-1c735daf2eef" path="/var/lib/kubelet/pods/1944c12b-e7c0-4c00-ab74-1c735daf2eef/volumes" Nov 25 15:40:59 crc kubenswrapper[4800]: I1125 15:40:59.961523 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:00 crc kubenswrapper[4800]: I1125 15:41:00.900382 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa","Type":"ContainerStarted","Data":"aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f"} Nov 25 15:41:00 crc kubenswrapper[4800]: I1125 15:41:00.900652 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa","Type":"ContainerStarted","Data":"6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4"} Nov 25 15:41:00 crc kubenswrapper[4800]: I1125 15:41:00.900663 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa","Type":"ContainerStarted","Data":"86698617e333a65a39e92b419c0d01d264d127028fd803001757f369eb2287c0"} Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.177664 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.178341 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.207328 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.232489 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.260803 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.260779368 podStartE2EDuration="4.260779368s" podCreationTimestamp="2025-11-25 15:40:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:41:00.927999731 +0000 UTC m=+1421.982408263" watchObservedRunningTime="2025-11-25 15:41:02.260779368 +0000 UTC m=+1423.315187850" Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.422973 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.423070 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.496983 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.611194 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bc89f58d7-cghtv"] Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.611467 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" podUID="7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" containerName="dnsmasq-dns" containerID="cri-o://57bb35409c39aa24e351375feedc485afcd89e79fa3cf6997144be05bb40d00e" gracePeriod=10 Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.929592 4800 generic.go:334] "Generic (PLEG): container finished" podID="7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" containerID="57bb35409c39aa24e351375feedc485afcd89e79fa3cf6997144be05bb40d00e" exitCode=0 Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.929721 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" event={"ID":"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0","Type":"ContainerDied","Data":"57bb35409c39aa24e351375feedc485afcd89e79fa3cf6997144be05bb40d00e"} Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.932694 4800 generic.go:334] "Generic (PLEG): container finished" podID="3f050231-ed56-42ad-aeb8-4a79ed9ed00f" containerID="65fdf81506c1f3b0f9184d604c33756baf4d866f3d20d76d05c3e1ea701c8c41" exitCode=0 Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.932742 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-v2d7j" event={"ID":"3f050231-ed56-42ad-aeb8-4a79ed9ed00f","Type":"ContainerDied","Data":"65fdf81506c1f3b0f9184d604c33756baf4d866f3d20d76d05c3e1ea701c8c41"} Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.936034 4800 generic.go:334] "Generic (PLEG): container finished" podID="a1268195-54b6-4a47-bcb7-eb573bb91209" containerID="5e340500223746d502aa5fe0ab3187adbb7f252d3127a9b39dac6998f5009185" exitCode=0 Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.937463 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-84lxn" event={"ID":"a1268195-54b6-4a47-bcb7-eb573bb91209","Type":"ContainerDied","Data":"5e340500223746d502aa5fe0ab3187adbb7f252d3127a9b39dac6998f5009185"} Nov 25 15:41:02 crc kubenswrapper[4800]: I1125 15:41:02.993677 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.206345 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.290298 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-ovsdbserver-nb\") pod \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.290428 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-dns-svc\") pod \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.290586 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-ovsdbserver-sb\") pod \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.290619 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-config\") pod \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.290700 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9g6vl\" (UniqueName: \"kubernetes.io/projected/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-kube-api-access-9g6vl\") pod \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\" (UID: \"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0\") " Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.300200 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-kube-api-access-9g6vl" (OuterVolumeSpecName: "kube-api-access-9g6vl") pod "7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" (UID: "7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0"). InnerVolumeSpecName "kube-api-access-9g6vl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.374597 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" (UID: "7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.394935 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9g6vl\" (UniqueName: \"kubernetes.io/projected/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-kube-api-access-9g6vl\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.394975 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.407095 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" (UID: "7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.408220 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-config" (OuterVolumeSpecName: "config") pod "7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" (UID: "7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.429573 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" (UID: "7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.496827 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.496892 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.496910 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.506088 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="033cc45c-99b1-4199-aee4-13218fbd5f32" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.173:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.506478 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="033cc45c-99b1-4199-aee4-13218fbd5f32" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.173:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.950466 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" event={"ID":"7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0","Type":"ContainerDied","Data":"053867d2a57ce3ea9b551af3336bc647c8af959ed3c36779cf29819427664b9a"} Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.950564 4800 scope.go:117] "RemoveContainer" containerID="57bb35409c39aa24e351375feedc485afcd89e79fa3cf6997144be05bb40d00e" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.950764 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bc89f58d7-cghtv" Nov 25 15:41:03 crc kubenswrapper[4800]: I1125 15:41:03.993409 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bc89f58d7-cghtv"] Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.001719 4800 scope.go:117] "RemoveContainer" containerID="5ad192cd906564c07f024661ac15d47dcd815d439033125822237908eb3746c0" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.012413 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bc89f58d7-cghtv"] Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.378963 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.379118 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.444995 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.452296 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.527957 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-combined-ca-bundle\") pod \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.528039 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-config-data\") pod \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.528106 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-config-data\") pod \"a1268195-54b6-4a47-bcb7-eb573bb91209\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.528211 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-scripts\") pod \"a1268195-54b6-4a47-bcb7-eb573bb91209\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.528239 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrg54\" (UniqueName: \"kubernetes.io/projected/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-kube-api-access-mrg54\") pod \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.528263 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-scripts\") pod \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\" (UID: \"3f050231-ed56-42ad-aeb8-4a79ed9ed00f\") " Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.528517 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-combined-ca-bundle\") pod \"a1268195-54b6-4a47-bcb7-eb573bb91209\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.528591 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gw59z\" (UniqueName: \"kubernetes.io/projected/a1268195-54b6-4a47-bcb7-eb573bb91209-kube-api-access-gw59z\") pod \"a1268195-54b6-4a47-bcb7-eb573bb91209\" (UID: \"a1268195-54b6-4a47-bcb7-eb573bb91209\") " Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.537683 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-kube-api-access-mrg54" (OuterVolumeSpecName: "kube-api-access-mrg54") pod "3f050231-ed56-42ad-aeb8-4a79ed9ed00f" (UID: "3f050231-ed56-42ad-aeb8-4a79ed9ed00f"). InnerVolumeSpecName "kube-api-access-mrg54". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.543187 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1268195-54b6-4a47-bcb7-eb573bb91209-kube-api-access-gw59z" (OuterVolumeSpecName: "kube-api-access-gw59z") pod "a1268195-54b6-4a47-bcb7-eb573bb91209" (UID: "a1268195-54b6-4a47-bcb7-eb573bb91209"). InnerVolumeSpecName "kube-api-access-gw59z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.549202 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-scripts" (OuterVolumeSpecName: "scripts") pod "3f050231-ed56-42ad-aeb8-4a79ed9ed00f" (UID: "3f050231-ed56-42ad-aeb8-4a79ed9ed00f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.562328 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-scripts" (OuterVolumeSpecName: "scripts") pod "a1268195-54b6-4a47-bcb7-eb573bb91209" (UID: "a1268195-54b6-4a47-bcb7-eb573bb91209"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.575768 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-config-data" (OuterVolumeSpecName: "config-data") pod "a1268195-54b6-4a47-bcb7-eb573bb91209" (UID: "a1268195-54b6-4a47-bcb7-eb573bb91209"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.577306 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f050231-ed56-42ad-aeb8-4a79ed9ed00f" (UID: "3f050231-ed56-42ad-aeb8-4a79ed9ed00f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.601980 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-config-data" (OuterVolumeSpecName: "config-data") pod "3f050231-ed56-42ad-aeb8-4a79ed9ed00f" (UID: "3f050231-ed56-42ad-aeb8-4a79ed9ed00f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.608675 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a1268195-54b6-4a47-bcb7-eb573bb91209" (UID: "a1268195-54b6-4a47-bcb7-eb573bb91209"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.635019 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.635067 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrg54\" (UniqueName: \"kubernetes.io/projected/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-kube-api-access-mrg54\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.635083 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.635095 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.635105 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gw59z\" (UniqueName: \"kubernetes.io/projected/a1268195-54b6-4a47-bcb7-eb573bb91209-kube-api-access-gw59z\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.635114 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.635126 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f050231-ed56-42ad-aeb8-4a79ed9ed00f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.635135 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1268195-54b6-4a47-bcb7-eb573bb91209-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.779687 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.969725 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-v2d7j" event={"ID":"3f050231-ed56-42ad-aeb8-4a79ed9ed00f","Type":"ContainerDied","Data":"c47716fa899bb0507ea92aeef16f1f87622faceb9382bd90b904ec9fd5059c7d"} Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.969792 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-v2d7j" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.969813 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c47716fa899bb0507ea92aeef16f1f87622faceb9382bd90b904ec9fd5059c7d" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.975441 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-84lxn" event={"ID":"a1268195-54b6-4a47-bcb7-eb573bb91209","Type":"ContainerDied","Data":"096f7f5688d8b8a2c0d0fbb71c295e050bb0835cae33cbd8d070162c4e182513"} Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.976143 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="096f7f5688d8b8a2c0d0fbb71c295e050bb0835cae33cbd8d070162c4e182513" Nov 25 15:41:04 crc kubenswrapper[4800]: I1125 15:41:04.976417 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-84lxn" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.067154 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 15:41:05 crc kubenswrapper[4800]: E1125 15:41:05.067587 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1268195-54b6-4a47-bcb7-eb573bb91209" containerName="nova-manage" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.067614 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1268195-54b6-4a47-bcb7-eb573bb91209" containerName="nova-manage" Nov 25 15:41:05 crc kubenswrapper[4800]: E1125 15:41:05.067632 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" containerName="dnsmasq-dns" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.067640 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" containerName="dnsmasq-dns" Nov 25 15:41:05 crc kubenswrapper[4800]: E1125 15:41:05.067670 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f050231-ed56-42ad-aeb8-4a79ed9ed00f" containerName="nova-cell1-conductor-db-sync" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.067676 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f050231-ed56-42ad-aeb8-4a79ed9ed00f" containerName="nova-cell1-conductor-db-sync" Nov 25 15:41:05 crc kubenswrapper[4800]: E1125 15:41:05.067691 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" containerName="init" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.067698 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" containerName="init" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.067905 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f050231-ed56-42ad-aeb8-4a79ed9ed00f" containerName="nova-cell1-conductor-db-sync" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.067923 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" containerName="dnsmasq-dns" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.067944 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1268195-54b6-4a47-bcb7-eb573bb91209" containerName="nova-manage" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.068595 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.073668 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.082938 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.145368 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mj4m7\" (UniqueName: \"kubernetes.io/projected/3046bf0c-3466-4a9a-9c78-84b1b5f8d164-kube-api-access-mj4m7\") pod \"nova-cell1-conductor-0\" (UID: \"3046bf0c-3466-4a9a-9c78-84b1b5f8d164\") " pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.145445 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3046bf0c-3466-4a9a-9c78-84b1b5f8d164-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"3046bf0c-3466-4a9a-9c78-84b1b5f8d164\") " pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.145498 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3046bf0c-3466-4a9a-9c78-84b1b5f8d164-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"3046bf0c-3466-4a9a-9c78-84b1b5f8d164\") " pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.220990 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.221312 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="033cc45c-99b1-4199-aee4-13218fbd5f32" containerName="nova-api-log" containerID="cri-o://7677d067723edc8673c57f40b29b4a8d1c846b0a612636c75f84a303f7819a61" gracePeriod=30 Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.223264 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="033cc45c-99b1-4199-aee4-13218fbd5f32" containerName="nova-api-api" containerID="cri-o://985dfce5809b4c785edba2049bebecb72e0789c1d7cb14bce8ffddccd5aeb0a5" gracePeriod=30 Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.247915 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.248168 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="a720b7c6-a205-4da7-bdb2-98e8ac932d17" containerName="nova-scheduler-scheduler" containerID="cri-o://0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75" gracePeriod=30 Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.251825 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3046bf0c-3466-4a9a-9c78-84b1b5f8d164-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"3046bf0c-3466-4a9a-9c78-84b1b5f8d164\") " pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.252425 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mj4m7\" (UniqueName: \"kubernetes.io/projected/3046bf0c-3466-4a9a-9c78-84b1b5f8d164-kube-api-access-mj4m7\") pod \"nova-cell1-conductor-0\" (UID: \"3046bf0c-3466-4a9a-9c78-84b1b5f8d164\") " pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.252634 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3046bf0c-3466-4a9a-9c78-84b1b5f8d164-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"3046bf0c-3466-4a9a-9c78-84b1b5f8d164\") " pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.256790 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3046bf0c-3466-4a9a-9c78-84b1b5f8d164-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"3046bf0c-3466-4a9a-9c78-84b1b5f8d164\") " pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.260257 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3046bf0c-3466-4a9a-9c78-84b1b5f8d164-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"3046bf0c-3466-4a9a-9c78-84b1b5f8d164\") " pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.269920 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.275996 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mj4m7\" (UniqueName: \"kubernetes.io/projected/3046bf0c-3466-4a9a-9c78-84b1b5f8d164-kube-api-access-mj4m7\") pod \"nova-cell1-conductor-0\" (UID: \"3046bf0c-3466-4a9a-9c78-84b1b5f8d164\") " pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.385065 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.802214 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0" path="/var/lib/kubelet/pods/7daed4fc-3a0c-420e-8cdc-79fcfd66f8c0/volumes" Nov 25 15:41:05 crc kubenswrapper[4800]: I1125 15:41:05.948555 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 15:41:05 crc kubenswrapper[4800]: W1125 15:41:05.952961 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3046bf0c_3466_4a9a_9c78_84b1b5f8d164.slice/crio-39e92538c26376f65700bdf6097d5f14064a43088e6bdfaf5a9775f24f854db2 WatchSource:0}: Error finding container 39e92538c26376f65700bdf6097d5f14064a43088e6bdfaf5a9775f24f854db2: Status 404 returned error can't find the container with id 39e92538c26376f65700bdf6097d5f14064a43088e6bdfaf5a9775f24f854db2 Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.005529 4800 generic.go:334] "Generic (PLEG): container finished" podID="033cc45c-99b1-4199-aee4-13218fbd5f32" containerID="7677d067723edc8673c57f40b29b4a8d1c846b0a612636c75f84a303f7819a61" exitCode=143 Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.005636 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"033cc45c-99b1-4199-aee4-13218fbd5f32","Type":"ContainerDied","Data":"7677d067723edc8673c57f40b29b4a8d1c846b0a612636c75f84a303f7819a61"} Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.006874 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"3046bf0c-3466-4a9a-9c78-84b1b5f8d164","Type":"ContainerStarted","Data":"39e92538c26376f65700bdf6097d5f14064a43088e6bdfaf5a9775f24f854db2"} Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.007010 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" containerName="nova-metadata-log" containerID="cri-o://6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4" gracePeriod=30 Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.007144 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" containerName="nova-metadata-metadata" containerID="cri-o://aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f" gracePeriod=30 Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.691670 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.794334 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2h9rv\" (UniqueName: \"kubernetes.io/projected/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-kube-api-access-2h9rv\") pod \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.794383 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-logs\") pod \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.794568 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-nova-metadata-tls-certs\") pod \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.794596 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-config-data\") pod \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.794628 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-combined-ca-bundle\") pod \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\" (UID: \"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa\") " Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.795114 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-logs" (OuterVolumeSpecName: "logs") pod "19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" (UID: "19feeacf-01d3-4552-bc0d-9dcdcd59d0fa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.803474 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-kube-api-access-2h9rv" (OuterVolumeSpecName: "kube-api-access-2h9rv") pod "19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" (UID: "19feeacf-01d3-4552-bc0d-9dcdcd59d0fa"). InnerVolumeSpecName "kube-api-access-2h9rv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.829937 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" (UID: "19feeacf-01d3-4552-bc0d-9dcdcd59d0fa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.832297 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-config-data" (OuterVolumeSpecName: "config-data") pod "19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" (UID: "19feeacf-01d3-4552-bc0d-9dcdcd59d0fa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.861876 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" (UID: "19feeacf-01d3-4552-bc0d-9dcdcd59d0fa"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.896929 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2h9rv\" (UniqueName: \"kubernetes.io/projected/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-kube-api-access-2h9rv\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.896976 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.896991 4800 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.897003 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:06 crc kubenswrapper[4800]: I1125 15:41:06.897015 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.019599 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"3046bf0c-3466-4a9a-9c78-84b1b5f8d164","Type":"ContainerStarted","Data":"f239f87e96523db61f1f2581c2472116ca6dab5ad92fc4596ad1ca4a12813381"} Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.020534 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.023493 4800 generic.go:334] "Generic (PLEG): container finished" podID="19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" containerID="aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f" exitCode=0 Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.023581 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa","Type":"ContainerDied","Data":"aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f"} Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.023688 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa","Type":"ContainerDied","Data":"6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4"} Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.023716 4800 scope.go:117] "RemoveContainer" containerID="aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.023647 4800 generic.go:334] "Generic (PLEG): container finished" podID="19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" containerID="6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4" exitCode=143 Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.023923 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"19feeacf-01d3-4552-bc0d-9dcdcd59d0fa","Type":"ContainerDied","Data":"86698617e333a65a39e92b419c0d01d264d127028fd803001757f369eb2287c0"} Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.023571 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.044664 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.044645502 podStartE2EDuration="2.044645502s" podCreationTimestamp="2025-11-25 15:41:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:41:07.03904106 +0000 UTC m=+1428.093449552" watchObservedRunningTime="2025-11-25 15:41:07.044645502 +0000 UTC m=+1428.099053984" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.057583 4800 scope.go:117] "RemoveContainer" containerID="6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.062984 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.070951 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.087636 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.088475 4800 scope.go:117] "RemoveContainer" containerID="aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f" Nov 25 15:41:07 crc kubenswrapper[4800]: E1125 15:41:07.088791 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" containerName="nova-metadata-metadata" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.088907 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" containerName="nova-metadata-metadata" Nov 25 15:41:07 crc kubenswrapper[4800]: E1125 15:41:07.089022 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" containerName="nova-metadata-log" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.089101 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" containerName="nova-metadata-log" Nov 25 15:41:07 crc kubenswrapper[4800]: E1125 15:41:07.089111 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f\": container with ID starting with aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f not found: ID does not exist" containerID="aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.089281 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f"} err="failed to get container status \"aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f\": rpc error: code = NotFound desc = could not find container \"aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f\": container with ID starting with aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f not found: ID does not exist" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.089312 4800 scope.go:117] "RemoveContainer" containerID="6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.089660 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" containerName="nova-metadata-log" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.089787 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" containerName="nova-metadata-metadata" Nov 25 15:41:07 crc kubenswrapper[4800]: E1125 15:41:07.089852 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4\": container with ID starting with 6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4 not found: ID does not exist" containerID="6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.089990 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4"} err="failed to get container status \"6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4\": rpc error: code = NotFound desc = could not find container \"6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4\": container with ID starting with 6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4 not found: ID does not exist" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.090030 4800 scope.go:117] "RemoveContainer" containerID="aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.091362 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.093314 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f"} err="failed to get container status \"aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f\": rpc error: code = NotFound desc = could not find container \"aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f\": container with ID starting with aa7488aa6e76dbbd6cf6f7eb68ce0f092cdff46e8cef990d0c43786ee89ece2f not found: ID does not exist" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.093388 4800 scope.go:117] "RemoveContainer" containerID="6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.094286 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4"} err="failed to get container status \"6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4\": rpc error: code = NotFound desc = could not find container \"6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4\": container with ID starting with 6c1fcf99da2c65b1cc4f5e655ae5d1637c1023c7f8348b3759bf0f966c1481a4 not found: ID does not exist" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.094495 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.095385 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.107043 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:07 crc kubenswrapper[4800]: E1125 15:41:07.179689 4800 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 15:41:07 crc kubenswrapper[4800]: E1125 15:41:07.181905 4800 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 15:41:07 crc kubenswrapper[4800]: E1125 15:41:07.183977 4800 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 15:41:07 crc kubenswrapper[4800]: E1125 15:41:07.184059 4800 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="a720b7c6-a205-4da7-bdb2-98e8ac932d17" containerName="nova-scheduler-scheduler" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.211044 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-config-data\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.211190 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wx7rf\" (UniqueName: \"kubernetes.io/projected/02cf6678-62f5-447b-bc73-32acc218e062-kube-api-access-wx7rf\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.211252 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.211280 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.211374 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02cf6678-62f5-447b-bc73-32acc218e062-logs\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.313029 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02cf6678-62f5-447b-bc73-32acc218e062-logs\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.313135 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-config-data\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.313227 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wx7rf\" (UniqueName: \"kubernetes.io/projected/02cf6678-62f5-447b-bc73-32acc218e062-kube-api-access-wx7rf\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.313270 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.313303 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.313730 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02cf6678-62f5-447b-bc73-32acc218e062-logs\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.320958 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-config-data\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.321181 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.332922 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.339457 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wx7rf\" (UniqueName: \"kubernetes.io/projected/02cf6678-62f5-447b-bc73-32acc218e062-kube-api-access-wx7rf\") pod \"nova-metadata-0\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.445260 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.805963 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19feeacf-01d3-4552-bc0d-9dcdcd59d0fa" path="/var/lib/kubelet/pods/19feeacf-01d3-4552-bc0d-9dcdcd59d0fa/volumes" Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.830300 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.976600 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 15:41:07 crc kubenswrapper[4800]: I1125 15:41:07.978236 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="65654af1-0a54-4d42-b45b-bae47243b055" containerName="kube-state-metrics" containerID="cri-o://d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680" gracePeriod=30 Nov 25 15:41:08 crc kubenswrapper[4800]: I1125 15:41:08.034332 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"02cf6678-62f5-447b-bc73-32acc218e062","Type":"ContainerStarted","Data":"effde8d1fc8287eba223d6d9c7d1c24993954a907caf6ecca4acda2fb0f4b2ff"} Nov 25 15:41:08 crc kubenswrapper[4800]: I1125 15:41:08.376933 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 15:41:08 crc kubenswrapper[4800]: I1125 15:41:08.436178 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br7bm\" (UniqueName: \"kubernetes.io/projected/65654af1-0a54-4d42-b45b-bae47243b055-kube-api-access-br7bm\") pod \"65654af1-0a54-4d42-b45b-bae47243b055\" (UID: \"65654af1-0a54-4d42-b45b-bae47243b055\") " Nov 25 15:41:08 crc kubenswrapper[4800]: I1125 15:41:08.444288 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65654af1-0a54-4d42-b45b-bae47243b055-kube-api-access-br7bm" (OuterVolumeSpecName: "kube-api-access-br7bm") pod "65654af1-0a54-4d42-b45b-bae47243b055" (UID: "65654af1-0a54-4d42-b45b-bae47243b055"). InnerVolumeSpecName "kube-api-access-br7bm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:08 crc kubenswrapper[4800]: I1125 15:41:08.538961 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br7bm\" (UniqueName: \"kubernetes.io/projected/65654af1-0a54-4d42-b45b-bae47243b055-kube-api-access-br7bm\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.049473 4800 generic.go:334] "Generic (PLEG): container finished" podID="65654af1-0a54-4d42-b45b-bae47243b055" containerID="d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680" exitCode=2 Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.049550 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.049574 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"65654af1-0a54-4d42-b45b-bae47243b055","Type":"ContainerDied","Data":"d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680"} Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.050150 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"65654af1-0a54-4d42-b45b-bae47243b055","Type":"ContainerDied","Data":"b027011e502f7e82f252aceffb505d6b560cb690e6618bfc10142ad74dd1c911"} Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.050176 4800 scope.go:117] "RemoveContainer" containerID="d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.053528 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"02cf6678-62f5-447b-bc73-32acc218e062","Type":"ContainerStarted","Data":"42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043"} Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.053574 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"02cf6678-62f5-447b-bc73-32acc218e062","Type":"ContainerStarted","Data":"405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7"} Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.075254 4800 scope.go:117] "RemoveContainer" containerID="d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680" Nov 25 15:41:09 crc kubenswrapper[4800]: E1125 15:41:09.075568 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680\": container with ID starting with d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680 not found: ID does not exist" containerID="d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.075605 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680"} err="failed to get container status \"d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680\": rpc error: code = NotFound desc = could not find container \"d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680\": container with ID starting with d498d8809a3bc69fa25fae22d5b73136b93e24662eb896db3be14fcd7ea3f680 not found: ID does not exist" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.084291 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.084272667 podStartE2EDuration="2.084272667s" podCreationTimestamp="2025-11-25 15:41:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:41:09.078622324 +0000 UTC m=+1430.133030806" watchObservedRunningTime="2025-11-25 15:41:09.084272667 +0000 UTC m=+1430.138681149" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.116607 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.116988 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="ceilometer-central-agent" containerID="cri-o://9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9" gracePeriod=30 Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.117484 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="proxy-httpd" containerID="cri-o://8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183" gracePeriod=30 Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.117538 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="sg-core" containerID="cri-o://f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd" gracePeriod=30 Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.117582 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="ceilometer-notification-agent" containerID="cri-o://1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25" gracePeriod=30 Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.132541 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.148479 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.178894 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 15:41:09 crc kubenswrapper[4800]: E1125 15:41:09.179380 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65654af1-0a54-4d42-b45b-bae47243b055" containerName="kube-state-metrics" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.179396 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="65654af1-0a54-4d42-b45b-bae47243b055" containerName="kube-state-metrics" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.179595 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="65654af1-0a54-4d42-b45b-bae47243b055" containerName="kube-state-metrics" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.180280 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.186266 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.186276 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.192224 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.255188 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4fv2\" (UniqueName: \"kubernetes.io/projected/26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a-kube-api-access-z4fv2\") pod \"kube-state-metrics-0\" (UID: \"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a\") " pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.255256 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a\") " pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.255340 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a\") " pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.255426 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a\") " pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.357012 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a\") " pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.357115 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a\") " pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.357184 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4fv2\" (UniqueName: \"kubernetes.io/projected/26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a-kube-api-access-z4fv2\") pod \"kube-state-metrics-0\" (UID: \"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a\") " pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.357203 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a\") " pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.364932 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a\") " pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.365020 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a\") " pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.371873 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a\") " pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.396254 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4fv2\" (UniqueName: \"kubernetes.io/projected/26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a-kube-api-access-z4fv2\") pod \"kube-state-metrics-0\" (UID: \"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a\") " pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.506185 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.798568 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65654af1-0a54-4d42-b45b-bae47243b055" path="/var/lib/kubelet/pods/65654af1-0a54-4d42-b45b-bae47243b055/volumes" Nov 25 15:41:09 crc kubenswrapper[4800]: I1125 15:41:09.932072 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.074991 4800 generic.go:334] "Generic (PLEG): container finished" podID="27fd4305-1e43-4444-9fa5-7ac870390999" containerID="8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183" exitCode=0 Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.075029 4800 generic.go:334] "Generic (PLEG): container finished" podID="27fd4305-1e43-4444-9fa5-7ac870390999" containerID="f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd" exitCode=2 Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.075038 4800 generic.go:334] "Generic (PLEG): container finished" podID="27fd4305-1e43-4444-9fa5-7ac870390999" containerID="9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9" exitCode=0 Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.075103 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27fd4305-1e43-4444-9fa5-7ac870390999","Type":"ContainerDied","Data":"8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183"} Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.075135 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27fd4305-1e43-4444-9fa5-7ac870390999","Type":"ContainerDied","Data":"f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd"} Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.075149 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27fd4305-1e43-4444-9fa5-7ac870390999","Type":"ContainerDied","Data":"9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9"} Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.078678 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvmc6\" (UniqueName: \"kubernetes.io/projected/a720b7c6-a205-4da7-bdb2-98e8ac932d17-kube-api-access-bvmc6\") pod \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\" (UID: \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\") " Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.080125 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a720b7c6-a205-4da7-bdb2-98e8ac932d17-config-data\") pod \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\" (UID: \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\") " Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.080197 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a720b7c6-a205-4da7-bdb2-98e8ac932d17-combined-ca-bundle\") pod \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\" (UID: \"a720b7c6-a205-4da7-bdb2-98e8ac932d17\") " Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.079219 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"033cc45c-99b1-4199-aee4-13218fbd5f32","Type":"ContainerDied","Data":"985dfce5809b4c785edba2049bebecb72e0789c1d7cb14bce8ffddccd5aeb0a5"} Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.079187 4800 generic.go:334] "Generic (PLEG): container finished" podID="033cc45c-99b1-4199-aee4-13218fbd5f32" containerID="985dfce5809b4c785edba2049bebecb72e0789c1d7cb14bce8ffddccd5aeb0a5" exitCode=0 Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.087982 4800 generic.go:334] "Generic (PLEG): container finished" podID="a720b7c6-a205-4da7-bdb2-98e8ac932d17" containerID="0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75" exitCode=0 Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.098665 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.098933 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a720b7c6-a205-4da7-bdb2-98e8ac932d17","Type":"ContainerDied","Data":"0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75"} Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.099022 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a720b7c6-a205-4da7-bdb2-98e8ac932d17","Type":"ContainerDied","Data":"9f00be0beb00672856bb03b27af497cfdc0be056a956a9fce329196a7befee7a"} Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.099059 4800 scope.go:117] "RemoveContainer" containerID="0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.146386 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a720b7c6-a205-4da7-bdb2-98e8ac932d17-config-data" (OuterVolumeSpecName: "config-data") pod "a720b7c6-a205-4da7-bdb2-98e8ac932d17" (UID: "a720b7c6-a205-4da7-bdb2-98e8ac932d17"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.150486 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a720b7c6-a205-4da7-bdb2-98e8ac932d17-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a720b7c6-a205-4da7-bdb2-98e8ac932d17" (UID: "a720b7c6-a205-4da7-bdb2-98e8ac932d17"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.158893 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a720b7c6-a205-4da7-bdb2-98e8ac932d17-kube-api-access-bvmc6" (OuterVolumeSpecName: "kube-api-access-bvmc6") pod "a720b7c6-a205-4da7-bdb2-98e8ac932d17" (UID: "a720b7c6-a205-4da7-bdb2-98e8ac932d17"). InnerVolumeSpecName "kube-api-access-bvmc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.187798 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvmc6\" (UniqueName: \"kubernetes.io/projected/a720b7c6-a205-4da7-bdb2-98e8ac932d17-kube-api-access-bvmc6\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.187856 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a720b7c6-a205-4da7-bdb2-98e8ac932d17-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.187866 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a720b7c6-a205-4da7-bdb2-98e8ac932d17-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.203386 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 15:41:10 crc kubenswrapper[4800]: W1125 15:41:10.211884 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod26eed0ca_ff5b_4fd3_9e2c_8a57ed553e2a.slice/crio-718ee88b4e79eef7a1a454442ebb80202f579bcf494977d8b43b0eacfaa3b76e WatchSource:0}: Error finding container 718ee88b4e79eef7a1a454442ebb80202f579bcf494977d8b43b0eacfaa3b76e: Status 404 returned error can't find the container with id 718ee88b4e79eef7a1a454442ebb80202f579bcf494977d8b43b0eacfaa3b76e Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.291822 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.309389 4800 scope.go:117] "RemoveContainer" containerID="0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75" Nov 25 15:41:10 crc kubenswrapper[4800]: E1125 15:41:10.309797 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75\": container with ID starting with 0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75 not found: ID does not exist" containerID="0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.309830 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75"} err="failed to get container status \"0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75\": rpc error: code = NotFound desc = could not find container \"0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75\": container with ID starting with 0e64af29524258013f004ae6d5a72e0d483a0af22259d97546ea4beeb0281d75 not found: ID does not exist" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.392026 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/033cc45c-99b1-4199-aee4-13218fbd5f32-config-data\") pod \"033cc45c-99b1-4199-aee4-13218fbd5f32\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.392152 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ghrd\" (UniqueName: \"kubernetes.io/projected/033cc45c-99b1-4199-aee4-13218fbd5f32-kube-api-access-5ghrd\") pod \"033cc45c-99b1-4199-aee4-13218fbd5f32\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.392198 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/033cc45c-99b1-4199-aee4-13218fbd5f32-logs\") pod \"033cc45c-99b1-4199-aee4-13218fbd5f32\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.392371 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/033cc45c-99b1-4199-aee4-13218fbd5f32-combined-ca-bundle\") pod \"033cc45c-99b1-4199-aee4-13218fbd5f32\" (UID: \"033cc45c-99b1-4199-aee4-13218fbd5f32\") " Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.393217 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/033cc45c-99b1-4199-aee4-13218fbd5f32-logs" (OuterVolumeSpecName: "logs") pod "033cc45c-99b1-4199-aee4-13218fbd5f32" (UID: "033cc45c-99b1-4199-aee4-13218fbd5f32"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.398611 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/033cc45c-99b1-4199-aee4-13218fbd5f32-kube-api-access-5ghrd" (OuterVolumeSpecName: "kube-api-access-5ghrd") pod "033cc45c-99b1-4199-aee4-13218fbd5f32" (UID: "033cc45c-99b1-4199-aee4-13218fbd5f32"). InnerVolumeSpecName "kube-api-access-5ghrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.420742 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/033cc45c-99b1-4199-aee4-13218fbd5f32-config-data" (OuterVolumeSpecName: "config-data") pod "033cc45c-99b1-4199-aee4-13218fbd5f32" (UID: "033cc45c-99b1-4199-aee4-13218fbd5f32"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.433489 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/033cc45c-99b1-4199-aee4-13218fbd5f32-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "033cc45c-99b1-4199-aee4-13218fbd5f32" (UID: "033cc45c-99b1-4199-aee4-13218fbd5f32"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.453376 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.471586 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.485259 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:41:10 crc kubenswrapper[4800]: E1125 15:41:10.485682 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a720b7c6-a205-4da7-bdb2-98e8ac932d17" containerName="nova-scheduler-scheduler" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.485701 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a720b7c6-a205-4da7-bdb2-98e8ac932d17" containerName="nova-scheduler-scheduler" Nov 25 15:41:10 crc kubenswrapper[4800]: E1125 15:41:10.485734 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="033cc45c-99b1-4199-aee4-13218fbd5f32" containerName="nova-api-log" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.485740 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="033cc45c-99b1-4199-aee4-13218fbd5f32" containerName="nova-api-log" Nov 25 15:41:10 crc kubenswrapper[4800]: E1125 15:41:10.485753 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="033cc45c-99b1-4199-aee4-13218fbd5f32" containerName="nova-api-api" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.485760 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="033cc45c-99b1-4199-aee4-13218fbd5f32" containerName="nova-api-api" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.485958 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="a720b7c6-a205-4da7-bdb2-98e8ac932d17" containerName="nova-scheduler-scheduler" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.485979 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="033cc45c-99b1-4199-aee4-13218fbd5f32" containerName="nova-api-api" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.486000 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="033cc45c-99b1-4199-aee4-13218fbd5f32" containerName="nova-api-log" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.486643 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.490368 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.498539 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/033cc45c-99b1-4199-aee4-13218fbd5f32-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.498568 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ghrd\" (UniqueName: \"kubernetes.io/projected/033cc45c-99b1-4199-aee4-13218fbd5f32-kube-api-access-5ghrd\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.498577 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/033cc45c-99b1-4199-aee4-13218fbd5f32-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.498587 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/033cc45c-99b1-4199-aee4-13218fbd5f32-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.506142 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.600585 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.600951 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-config-data\") pod \"nova-scheduler-0\" (UID: \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.601139 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrhpk\" (UniqueName: \"kubernetes.io/projected/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-kube-api-access-nrhpk\") pod \"nova-scheduler-0\" (UID: \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.703508 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-config-data\") pod \"nova-scheduler-0\" (UID: \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.703872 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrhpk\" (UniqueName: \"kubernetes.io/projected/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-kube-api-access-nrhpk\") pod \"nova-scheduler-0\" (UID: \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.704047 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.707913 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-config-data\") pod \"nova-scheduler-0\" (UID: \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.707950 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.722744 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrhpk\" (UniqueName: \"kubernetes.io/projected/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-kube-api-access-nrhpk\") pod \"nova-scheduler-0\" (UID: \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:10 crc kubenswrapper[4800]: I1125 15:41:10.806744 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.100699 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a","Type":"ContainerStarted","Data":"d4f26a157d352a74d2f1fde16125732de6dbd9761dfca9b25a155069284c6bcb"} Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.101168 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a","Type":"ContainerStarted","Data":"718ee88b4e79eef7a1a454442ebb80202f579bcf494977d8b43b0eacfaa3b76e"} Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.102131 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.105724 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"033cc45c-99b1-4199-aee4-13218fbd5f32","Type":"ContainerDied","Data":"ce0fcfaa23bcdcad6aeec98e4ee14cce9c9eb0d25321a698632363f99883ed66"} Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.105764 4800 scope.go:117] "RemoveContainer" containerID="985dfce5809b4c785edba2049bebecb72e0789c1d7cb14bce8ffddccd5aeb0a5" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.105858 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.149956 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.772882113 podStartE2EDuration="2.149929884s" podCreationTimestamp="2025-11-25 15:41:09 +0000 UTC" firstStartedPulling="2025-11-25 15:41:10.219469263 +0000 UTC m=+1431.273877745" lastFinishedPulling="2025-11-25 15:41:10.596517034 +0000 UTC m=+1431.650925516" observedRunningTime="2025-11-25 15:41:11.140959462 +0000 UTC m=+1432.195367954" watchObservedRunningTime="2025-11-25 15:41:11.149929884 +0000 UTC m=+1432.204338366" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.176860 4800 scope.go:117] "RemoveContainer" containerID="7677d067723edc8673c57f40b29b4a8d1c846b0a612636c75f84a303f7819a61" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.178882 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.202668 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.222924 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.225342 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.228498 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.229459 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.309020 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.348889 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91a204b-3779-4299-a6c0-1b4fa3c89045-config-data\") pod \"nova-api-0\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.349351 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91a204b-3779-4299-a6c0-1b4fa3c89045-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.349451 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c91a204b-3779-4299-a6c0-1b4fa3c89045-logs\") pod \"nova-api-0\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.349700 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8h2k\" (UniqueName: \"kubernetes.io/projected/c91a204b-3779-4299-a6c0-1b4fa3c89045-kube-api-access-d8h2k\") pod \"nova-api-0\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.452559 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91a204b-3779-4299-a6c0-1b4fa3c89045-config-data\") pod \"nova-api-0\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.452701 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91a204b-3779-4299-a6c0-1b4fa3c89045-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.452748 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c91a204b-3779-4299-a6c0-1b4fa3c89045-logs\") pod \"nova-api-0\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.452797 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8h2k\" (UniqueName: \"kubernetes.io/projected/c91a204b-3779-4299-a6c0-1b4fa3c89045-kube-api-access-d8h2k\") pod \"nova-api-0\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.453715 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c91a204b-3779-4299-a6c0-1b4fa3c89045-logs\") pod \"nova-api-0\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.458862 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91a204b-3779-4299-a6c0-1b4fa3c89045-config-data\") pod \"nova-api-0\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.459580 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91a204b-3779-4299-a6c0-1b4fa3c89045-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.471757 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8h2k\" (UniqueName: \"kubernetes.io/projected/c91a204b-3779-4299-a6c0-1b4fa3c89045-kube-api-access-d8h2k\") pod \"nova-api-0\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.560514 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.808176 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="033cc45c-99b1-4199-aee4-13218fbd5f32" path="/var/lib/kubelet/pods/033cc45c-99b1-4199-aee4-13218fbd5f32/volumes" Nov 25 15:41:11 crc kubenswrapper[4800]: I1125 15:41:11.809703 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a720b7c6-a205-4da7-bdb2-98e8ac932d17" path="/var/lib/kubelet/pods/a720b7c6-a205-4da7-bdb2-98e8ac932d17/volumes" Nov 25 15:41:12 crc kubenswrapper[4800]: I1125 15:41:12.042110 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:12 crc kubenswrapper[4800]: W1125 15:41:12.048041 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc91a204b_3779_4299_a6c0_1b4fa3c89045.slice/crio-acfed66152acc723c08da5f75b12dfb111e2cf9554159af6e19e613f90333afb WatchSource:0}: Error finding container acfed66152acc723c08da5f75b12dfb111e2cf9554159af6e19e613f90333afb: Status 404 returned error can't find the container with id acfed66152acc723c08da5f75b12dfb111e2cf9554159af6e19e613f90333afb Nov 25 15:41:12 crc kubenswrapper[4800]: I1125 15:41:12.139677 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca","Type":"ContainerStarted","Data":"47db4de92612a77d52246308fd86a7968de42e76cf0e60ed676fe57dd43086d8"} Nov 25 15:41:12 crc kubenswrapper[4800]: I1125 15:41:12.139736 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca","Type":"ContainerStarted","Data":"02a28ba54d6b99f0aa8626c73c28ea89826f7faf5f8ab617a69d99150de4926b"} Nov 25 15:41:12 crc kubenswrapper[4800]: I1125 15:41:12.153621 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c91a204b-3779-4299-a6c0-1b4fa3c89045","Type":"ContainerStarted","Data":"acfed66152acc723c08da5f75b12dfb111e2cf9554159af6e19e613f90333afb"} Nov 25 15:41:12 crc kubenswrapper[4800]: I1125 15:41:12.181437 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.181406657 podStartE2EDuration="2.181406657s" podCreationTimestamp="2025-11-25 15:41:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:41:12.170305037 +0000 UTC m=+1433.224713519" watchObservedRunningTime="2025-11-25 15:41:12.181406657 +0000 UTC m=+1433.235815149" Nov 25 15:41:12 crc kubenswrapper[4800]: I1125 15:41:12.445930 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 15:41:12 crc kubenswrapper[4800]: I1125 15:41:12.446353 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 15:41:12 crc kubenswrapper[4800]: I1125 15:41:12.639661 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:41:12 crc kubenswrapper[4800]: I1125 15:41:12.639717 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.177284 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c91a204b-3779-4299-a6c0-1b4fa3c89045","Type":"ContainerStarted","Data":"2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14"} Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.177692 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c91a204b-3779-4299-a6c0-1b4fa3c89045","Type":"ContainerStarted","Data":"2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895"} Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.202988 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.202961181 podStartE2EDuration="2.202961181s" podCreationTimestamp="2025-11-25 15:41:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:41:13.195316114 +0000 UTC m=+1434.249724596" watchObservedRunningTime="2025-11-25 15:41:13.202961181 +0000 UTC m=+1434.257369663" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.578480 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.703747 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-combined-ca-bundle\") pod \"27fd4305-1e43-4444-9fa5-7ac870390999\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.703973 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27fd4305-1e43-4444-9fa5-7ac870390999-log-httpd\") pod \"27fd4305-1e43-4444-9fa5-7ac870390999\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.704006 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-scripts\") pod \"27fd4305-1e43-4444-9fa5-7ac870390999\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.704060 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27fd4305-1e43-4444-9fa5-7ac870390999-run-httpd\") pod \"27fd4305-1e43-4444-9fa5-7ac870390999\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.704090 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-sg-core-conf-yaml\") pod \"27fd4305-1e43-4444-9fa5-7ac870390999\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.704151 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-config-data\") pod \"27fd4305-1e43-4444-9fa5-7ac870390999\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.704179 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdmb6\" (UniqueName: \"kubernetes.io/projected/27fd4305-1e43-4444-9fa5-7ac870390999-kube-api-access-fdmb6\") pod \"27fd4305-1e43-4444-9fa5-7ac870390999\" (UID: \"27fd4305-1e43-4444-9fa5-7ac870390999\") " Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.704406 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27fd4305-1e43-4444-9fa5-7ac870390999-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "27fd4305-1e43-4444-9fa5-7ac870390999" (UID: "27fd4305-1e43-4444-9fa5-7ac870390999"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.704588 4800 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27fd4305-1e43-4444-9fa5-7ac870390999-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.704891 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27fd4305-1e43-4444-9fa5-7ac870390999-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "27fd4305-1e43-4444-9fa5-7ac870390999" (UID: "27fd4305-1e43-4444-9fa5-7ac870390999"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.710115 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-scripts" (OuterVolumeSpecName: "scripts") pod "27fd4305-1e43-4444-9fa5-7ac870390999" (UID: "27fd4305-1e43-4444-9fa5-7ac870390999"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.711065 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27fd4305-1e43-4444-9fa5-7ac870390999-kube-api-access-fdmb6" (OuterVolumeSpecName: "kube-api-access-fdmb6") pod "27fd4305-1e43-4444-9fa5-7ac870390999" (UID: "27fd4305-1e43-4444-9fa5-7ac870390999"). InnerVolumeSpecName "kube-api-access-fdmb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.763989 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "27fd4305-1e43-4444-9fa5-7ac870390999" (UID: "27fd4305-1e43-4444-9fa5-7ac870390999"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.799759 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "27fd4305-1e43-4444-9fa5-7ac870390999" (UID: "27fd4305-1e43-4444-9fa5-7ac870390999"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.806262 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.806311 4800 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27fd4305-1e43-4444-9fa5-7ac870390999-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.806326 4800 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.806342 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdmb6\" (UniqueName: \"kubernetes.io/projected/27fd4305-1e43-4444-9fa5-7ac870390999-kube-api-access-fdmb6\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.806355 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.839468 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-config-data" (OuterVolumeSpecName: "config-data") pod "27fd4305-1e43-4444-9fa5-7ac870390999" (UID: "27fd4305-1e43-4444-9fa5-7ac870390999"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:13 crc kubenswrapper[4800]: I1125 15:41:13.907983 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27fd4305-1e43-4444-9fa5-7ac870390999-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.186971 4800 generic.go:334] "Generic (PLEG): container finished" podID="27fd4305-1e43-4444-9fa5-7ac870390999" containerID="1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25" exitCode=0 Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.187104 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.187192 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27fd4305-1e43-4444-9fa5-7ac870390999","Type":"ContainerDied","Data":"1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25"} Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.187228 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27fd4305-1e43-4444-9fa5-7ac870390999","Type":"ContainerDied","Data":"12258d9bb9db40ab17165c45271a8e96d7d7a4f3fe6480dad2e5a1bcc5621bb3"} Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.187269 4800 scope.go:117] "RemoveContainer" containerID="8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.211693 4800 scope.go:117] "RemoveContainer" containerID="f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.230370 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.240100 4800 scope.go:117] "RemoveContainer" containerID="1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.240944 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.265880 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.267179 4800 scope.go:117] "RemoveContainer" containerID="9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9" Nov 25 15:41:14 crc kubenswrapper[4800]: E1125 15:41:14.272044 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="ceilometer-notification-agent" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.272086 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="ceilometer-notification-agent" Nov 25 15:41:14 crc kubenswrapper[4800]: E1125 15:41:14.272108 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="proxy-httpd" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.272120 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="proxy-httpd" Nov 25 15:41:14 crc kubenswrapper[4800]: E1125 15:41:14.272143 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="ceilometer-central-agent" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.272153 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="ceilometer-central-agent" Nov 25 15:41:14 crc kubenswrapper[4800]: E1125 15:41:14.272179 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="sg-core" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.272188 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="sg-core" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.272432 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="ceilometer-notification-agent" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.272452 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="proxy-httpd" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.272478 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="ceilometer-central-agent" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.272494 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" containerName="sg-core" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.274745 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.282080 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.282472 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.282634 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.286106 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.289687 4800 scope.go:117] "RemoveContainer" containerID="8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183" Nov 25 15:41:14 crc kubenswrapper[4800]: E1125 15:41:14.290102 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183\": container with ID starting with 8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183 not found: ID does not exist" containerID="8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.290139 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183"} err="failed to get container status \"8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183\": rpc error: code = NotFound desc = could not find container \"8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183\": container with ID starting with 8c683b1bed06272ddda23ca7c046e7e365272f0d0bd8314d46952df7b9c99183 not found: ID does not exist" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.290165 4800 scope.go:117] "RemoveContainer" containerID="f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd" Nov 25 15:41:14 crc kubenswrapper[4800]: E1125 15:41:14.290462 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd\": container with ID starting with f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd not found: ID does not exist" containerID="f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.290483 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd"} err="failed to get container status \"f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd\": rpc error: code = NotFound desc = could not find container \"f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd\": container with ID starting with f7937480f9843293b8bb26010eec5ccd965dd16fd3f02917742cb343a20551bd not found: ID does not exist" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.290495 4800 scope.go:117] "RemoveContainer" containerID="1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25" Nov 25 15:41:14 crc kubenswrapper[4800]: E1125 15:41:14.290741 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25\": container with ID starting with 1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25 not found: ID does not exist" containerID="1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.290761 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25"} err="failed to get container status \"1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25\": rpc error: code = NotFound desc = could not find container \"1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25\": container with ID starting with 1d2b45c7621cee7285fbbf418b0d3b47786545d452140455d1844a8d757f2a25 not found: ID does not exist" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.290772 4800 scope.go:117] "RemoveContainer" containerID="9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9" Nov 25 15:41:14 crc kubenswrapper[4800]: E1125 15:41:14.291201 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9\": container with ID starting with 9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9 not found: ID does not exist" containerID="9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.291224 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9"} err="failed to get container status \"9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9\": rpc error: code = NotFound desc = could not find container \"9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9\": container with ID starting with 9c7e6497908ea420ee5c0186e7604c2d1c7aec8e15076e29ac4e8554a98f9ea9 not found: ID does not exist" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.416637 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lv89g\" (UniqueName: \"kubernetes.io/projected/545e96ca-eabd-4864-b5b4-b3c27825e583-kube-api-access-lv89g\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.416710 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/545e96ca-eabd-4864-b5b4-b3c27825e583-log-httpd\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.416776 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-config-data\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.417165 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.417202 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.417320 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.417375 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-scripts\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.417447 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/545e96ca-eabd-4864-b5b4-b3c27825e583-run-httpd\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.519247 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-config-data\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.519307 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.519362 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.519404 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.519438 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-scripts\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.519466 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/545e96ca-eabd-4864-b5b4-b3c27825e583-run-httpd\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.519570 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lv89g\" (UniqueName: \"kubernetes.io/projected/545e96ca-eabd-4864-b5b4-b3c27825e583-kube-api-access-lv89g\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.519599 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/545e96ca-eabd-4864-b5b4-b3c27825e583-log-httpd\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.520224 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/545e96ca-eabd-4864-b5b4-b3c27825e583-log-httpd\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.520333 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/545e96ca-eabd-4864-b5b4-b3c27825e583-run-httpd\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.525579 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-scripts\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.525673 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.525866 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.525912 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-config-data\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.526349 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.545870 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lv89g\" (UniqueName: \"kubernetes.io/projected/545e96ca-eabd-4864-b5b4-b3c27825e583-kube-api-access-lv89g\") pod \"ceilometer-0\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " pod="openstack/ceilometer-0" Nov 25 15:41:14 crc kubenswrapper[4800]: I1125 15:41:14.631863 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:41:15 crc kubenswrapper[4800]: I1125 15:41:15.097588 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:41:15 crc kubenswrapper[4800]: I1125 15:41:15.197117 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"545e96ca-eabd-4864-b5b4-b3c27825e583","Type":"ContainerStarted","Data":"9457a50fde1b1192bbc2180ad69e378d45d7c445797501e1d1cba0d9ab67e4e8"} Nov 25 15:41:15 crc kubenswrapper[4800]: I1125 15:41:15.418352 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 25 15:41:15 crc kubenswrapper[4800]: I1125 15:41:15.798818 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27fd4305-1e43-4444-9fa5-7ac870390999" path="/var/lib/kubelet/pods/27fd4305-1e43-4444-9fa5-7ac870390999/volumes" Nov 25 15:41:15 crc kubenswrapper[4800]: I1125 15:41:15.807375 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 15:41:16 crc kubenswrapper[4800]: I1125 15:41:16.209317 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"545e96ca-eabd-4864-b5b4-b3c27825e583","Type":"ContainerStarted","Data":"46d08c25106198b99388cc8911a0911797fe3e0042bfcd6ece8037d995812b91"} Nov 25 15:41:17 crc kubenswrapper[4800]: I1125 15:41:17.226602 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"545e96ca-eabd-4864-b5b4-b3c27825e583","Type":"ContainerStarted","Data":"08b8a92d9bf0741b7ea5c0627db2174ff7140d67bf9d64f4baf62cbf0faf414f"} Nov 25 15:41:17 crc kubenswrapper[4800]: I1125 15:41:17.446354 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 15:41:17 crc kubenswrapper[4800]: I1125 15:41:17.446691 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 15:41:18 crc kubenswrapper[4800]: I1125 15:41:18.244956 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"545e96ca-eabd-4864-b5b4-b3c27825e583","Type":"ContainerStarted","Data":"fb0003d0f03cf57a239873879dfea612ab34cb02fb5bb5fca83c197751af760a"} Nov 25 15:41:18 crc kubenswrapper[4800]: I1125 15:41:18.462886 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="02cf6678-62f5-447b-bc73-32acc218e062" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.179:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 15:41:18 crc kubenswrapper[4800]: I1125 15:41:18.462879 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="02cf6678-62f5-447b-bc73-32acc218e062" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.179:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 15:41:19 crc kubenswrapper[4800]: I1125 15:41:19.256575 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"545e96ca-eabd-4864-b5b4-b3c27825e583","Type":"ContainerStarted","Data":"09f23df7bac7e4fb39baf596b648f2eb5be6c6333f805b6cb7bb5cc8dd16849f"} Nov 25 15:41:19 crc kubenswrapper[4800]: I1125 15:41:19.257060 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 15:41:19 crc kubenswrapper[4800]: I1125 15:41:19.286685 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.959661969 podStartE2EDuration="5.286659262s" podCreationTimestamp="2025-11-25 15:41:14 +0000 UTC" firstStartedPulling="2025-11-25 15:41:15.10217885 +0000 UTC m=+1436.156587332" lastFinishedPulling="2025-11-25 15:41:18.429176143 +0000 UTC m=+1439.483584625" observedRunningTime="2025-11-25 15:41:19.277041213 +0000 UTC m=+1440.331449695" watchObservedRunningTime="2025-11-25 15:41:19.286659262 +0000 UTC m=+1440.341067744" Nov 25 15:41:19 crc kubenswrapper[4800]: I1125 15:41:19.518789 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 15:41:20 crc kubenswrapper[4800]: I1125 15:41:20.807209 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 15:41:20 crc kubenswrapper[4800]: I1125 15:41:20.845269 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 15:41:21 crc kubenswrapper[4800]: I1125 15:41:21.305418 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 15:41:21 crc kubenswrapper[4800]: I1125 15:41:21.560954 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 15:41:21 crc kubenswrapper[4800]: I1125 15:41:21.561028 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 15:41:22 crc kubenswrapper[4800]: I1125 15:41:22.643105 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c91a204b-3779-4299-a6c0-1b4fa3c89045" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.182:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 15:41:22 crc kubenswrapper[4800]: I1125 15:41:22.644539 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c91a204b-3779-4299-a6c0-1b4fa3c89045" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.182:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 15:41:27 crc kubenswrapper[4800]: I1125 15:41:27.453963 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 15:41:27 crc kubenswrapper[4800]: I1125 15:41:27.460806 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 15:41:27 crc kubenswrapper[4800]: I1125 15:41:27.460966 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 15:41:28 crc kubenswrapper[4800]: I1125 15:41:28.343701 4800 generic.go:334] "Generic (PLEG): container finished" podID="c9b37821-9578-4c63-b27a-194684167a87" containerID="8b7c3a999bd3d784ab71760c6c3973147ac2b3b18484568022240afe2d978487" exitCode=137 Nov 25 15:41:28 crc kubenswrapper[4800]: I1125 15:41:28.343776 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c9b37821-9578-4c63-b27a-194684167a87","Type":"ContainerDied","Data":"8b7c3a999bd3d784ab71760c6c3973147ac2b3b18484568022240afe2d978487"} Nov 25 15:41:28 crc kubenswrapper[4800]: I1125 15:41:28.355011 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 15:41:28 crc kubenswrapper[4800]: I1125 15:41:28.922255 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.090905 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4fcjt\" (UniqueName: \"kubernetes.io/projected/c9b37821-9578-4c63-b27a-194684167a87-kube-api-access-4fcjt\") pod \"c9b37821-9578-4c63-b27a-194684167a87\" (UID: \"c9b37821-9578-4c63-b27a-194684167a87\") " Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.091431 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9b37821-9578-4c63-b27a-194684167a87-config-data\") pod \"c9b37821-9578-4c63-b27a-194684167a87\" (UID: \"c9b37821-9578-4c63-b27a-194684167a87\") " Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.092110 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9b37821-9578-4c63-b27a-194684167a87-combined-ca-bundle\") pod \"c9b37821-9578-4c63-b27a-194684167a87\" (UID: \"c9b37821-9578-4c63-b27a-194684167a87\") " Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.101737 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9b37821-9578-4c63-b27a-194684167a87-kube-api-access-4fcjt" (OuterVolumeSpecName: "kube-api-access-4fcjt") pod "c9b37821-9578-4c63-b27a-194684167a87" (UID: "c9b37821-9578-4c63-b27a-194684167a87"). InnerVolumeSpecName "kube-api-access-4fcjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.132740 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9b37821-9578-4c63-b27a-194684167a87-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c9b37821-9578-4c63-b27a-194684167a87" (UID: "c9b37821-9578-4c63-b27a-194684167a87"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.139097 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9b37821-9578-4c63-b27a-194684167a87-config-data" (OuterVolumeSpecName: "config-data") pod "c9b37821-9578-4c63-b27a-194684167a87" (UID: "c9b37821-9578-4c63-b27a-194684167a87"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.194639 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4fcjt\" (UniqueName: \"kubernetes.io/projected/c9b37821-9578-4c63-b27a-194684167a87-kube-api-access-4fcjt\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.194810 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9b37821-9578-4c63-b27a-194684167a87-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.194820 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9b37821-9578-4c63-b27a-194684167a87-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.356683 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c9b37821-9578-4c63-b27a-194684167a87","Type":"ContainerDied","Data":"b107c621a2a9ba1aab6700b28a7c82577d35cb68114c6977ee4ea846ff71f864"} Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.356697 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.356747 4800 scope.go:117] "RemoveContainer" containerID="8b7c3a999bd3d784ab71760c6c3973147ac2b3b18484568022240afe2d978487" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.408422 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.429484 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.440501 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 15:41:29 crc kubenswrapper[4800]: E1125 15:41:29.441256 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9b37821-9578-4c63-b27a-194684167a87" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.441304 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9b37821-9578-4c63-b27a-194684167a87" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.441677 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9b37821-9578-4c63-b27a-194684167a87" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.442986 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.450325 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.451290 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.451644 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.462831 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.500094 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/418ea34b-91e7-4bed-852c-2856c9c414d1-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.500140 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/418ea34b-91e7-4bed-852c-2856c9c414d1-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.500191 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2qfd\" (UniqueName: \"kubernetes.io/projected/418ea34b-91e7-4bed-852c-2856c9c414d1-kube-api-access-q2qfd\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.500288 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/418ea34b-91e7-4bed-852c-2856c9c414d1-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.500316 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/418ea34b-91e7-4bed-852c-2856c9c414d1-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.602426 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/418ea34b-91e7-4bed-852c-2856c9c414d1-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.602482 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/418ea34b-91e7-4bed-852c-2856c9c414d1-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.602583 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2qfd\" (UniqueName: \"kubernetes.io/projected/418ea34b-91e7-4bed-852c-2856c9c414d1-kube-api-access-q2qfd\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.603439 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/418ea34b-91e7-4bed-852c-2856c9c414d1-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.603487 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/418ea34b-91e7-4bed-852c-2856c9c414d1-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.606784 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/418ea34b-91e7-4bed-852c-2856c9c414d1-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.607332 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/418ea34b-91e7-4bed-852c-2856c9c414d1-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.607583 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/418ea34b-91e7-4bed-852c-2856c9c414d1-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.608795 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/418ea34b-91e7-4bed-852c-2856c9c414d1-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.622377 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2qfd\" (UniqueName: \"kubernetes.io/projected/418ea34b-91e7-4bed-852c-2856c9c414d1-kube-api-access-q2qfd\") pod \"nova-cell1-novncproxy-0\" (UID: \"418ea34b-91e7-4bed-852c-2856c9c414d1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.775323 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:29 crc kubenswrapper[4800]: I1125 15:41:29.797803 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9b37821-9578-4c63-b27a-194684167a87" path="/var/lib/kubelet/pods/c9b37821-9578-4c63-b27a-194684167a87/volumes" Nov 25 15:41:30 crc kubenswrapper[4800]: W1125 15:41:30.207857 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod418ea34b_91e7_4bed_852c_2856c9c414d1.slice/crio-b4d7de63501a24b248de566ff7b2913daa17a5627b7fcccecc3ec11d0c3f6a04 WatchSource:0}: Error finding container b4d7de63501a24b248de566ff7b2913daa17a5627b7fcccecc3ec11d0c3f6a04: Status 404 returned error can't find the container with id b4d7de63501a24b248de566ff7b2913daa17a5627b7fcccecc3ec11d0c3f6a04 Nov 25 15:41:30 crc kubenswrapper[4800]: I1125 15:41:30.210020 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 15:41:30 crc kubenswrapper[4800]: I1125 15:41:30.369664 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"418ea34b-91e7-4bed-852c-2856c9c414d1","Type":"ContainerStarted","Data":"b4d7de63501a24b248de566ff7b2913daa17a5627b7fcccecc3ec11d0c3f6a04"} Nov 25 15:41:31 crc kubenswrapper[4800]: I1125 15:41:31.380664 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"418ea34b-91e7-4bed-852c-2856c9c414d1","Type":"ContainerStarted","Data":"c9ce02a36c29b4787fb18638f9e1efe99e5b80473e8f46670e9dcbe51406c18c"} Nov 25 15:41:31 crc kubenswrapper[4800]: I1125 15:41:31.405605 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.405579596 podStartE2EDuration="2.405579596s" podCreationTimestamp="2025-11-25 15:41:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:41:31.400715394 +0000 UTC m=+1452.455123876" watchObservedRunningTime="2025-11-25 15:41:31.405579596 +0000 UTC m=+1452.459988078" Nov 25 15:41:31 crc kubenswrapper[4800]: I1125 15:41:31.576952 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 15:41:31 crc kubenswrapper[4800]: I1125 15:41:31.578219 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 15:41:31 crc kubenswrapper[4800]: I1125 15:41:31.583758 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 15:41:31 crc kubenswrapper[4800]: I1125 15:41:31.586827 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.394022 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.399388 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.621626 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f95c456cf-gvqr6"] Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.623677 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.637584 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f95c456cf-gvqr6"] Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.765406 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-config\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.765509 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-ovsdbserver-sb\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.765543 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-dns-svc\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.765680 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nn48b\" (UniqueName: \"kubernetes.io/projected/bcd422c7-527d-4fe3-802f-72656b4bf034-kube-api-access-nn48b\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.765754 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-ovsdbserver-nb\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.867788 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nn48b\" (UniqueName: \"kubernetes.io/projected/bcd422c7-527d-4fe3-802f-72656b4bf034-kube-api-access-nn48b\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.868420 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-ovsdbserver-nb\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.868647 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-config\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.868919 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-ovsdbserver-sb\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.869110 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-dns-svc\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.869997 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-ovsdbserver-nb\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.870107 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-dns-svc\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.870349 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-config\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.870387 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-ovsdbserver-sb\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.892744 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nn48b\" (UniqueName: \"kubernetes.io/projected/bcd422c7-527d-4fe3-802f-72656b4bf034-kube-api-access-nn48b\") pod \"dnsmasq-dns-f95c456cf-gvqr6\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:32 crc kubenswrapper[4800]: I1125 15:41:32.946654 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:33 crc kubenswrapper[4800]: I1125 15:41:33.596859 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f95c456cf-gvqr6"] Nov 25 15:41:33 crc kubenswrapper[4800]: W1125 15:41:33.598469 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbcd422c7_527d_4fe3_802f_72656b4bf034.slice/crio-69970a0e994d6cbb29566556688f5ec2bd0f7b5407d1bce3ea8c3b690abca725 WatchSource:0}: Error finding container 69970a0e994d6cbb29566556688f5ec2bd0f7b5407d1bce3ea8c3b690abca725: Status 404 returned error can't find the container with id 69970a0e994d6cbb29566556688f5ec2bd0f7b5407d1bce3ea8c3b690abca725 Nov 25 15:41:34 crc kubenswrapper[4800]: I1125 15:41:34.426228 4800 generic.go:334] "Generic (PLEG): container finished" podID="bcd422c7-527d-4fe3-802f-72656b4bf034" containerID="63bca906a893f939a5fc8e681bc28058ed4f1a33d27cbb42f2ed68a78c546957" exitCode=0 Nov 25 15:41:34 crc kubenswrapper[4800]: I1125 15:41:34.426337 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" event={"ID":"bcd422c7-527d-4fe3-802f-72656b4bf034","Type":"ContainerDied","Data":"63bca906a893f939a5fc8e681bc28058ed4f1a33d27cbb42f2ed68a78c546957"} Nov 25 15:41:34 crc kubenswrapper[4800]: I1125 15:41:34.426596 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" event={"ID":"bcd422c7-527d-4fe3-802f-72656b4bf034","Type":"ContainerStarted","Data":"69970a0e994d6cbb29566556688f5ec2bd0f7b5407d1bce3ea8c3b690abca725"} Nov 25 15:41:34 crc kubenswrapper[4800]: I1125 15:41:34.776198 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.007945 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.008475 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="ceilometer-central-agent" containerID="cri-o://46d08c25106198b99388cc8911a0911797fe3e0042bfcd6ece8037d995812b91" gracePeriod=30 Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.008600 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="proxy-httpd" containerID="cri-o://09f23df7bac7e4fb39baf596b648f2eb5be6c6333f805b6cb7bb5cc8dd16849f" gracePeriod=30 Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.008692 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="ceilometer-notification-agent" containerID="cri-o://08b8a92d9bf0741b7ea5c0627db2174ff7140d67bf9d64f4baf62cbf0faf414f" gracePeriod=30 Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.008833 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="sg-core" containerID="cri-o://fb0003d0f03cf57a239873879dfea612ab34cb02fb5bb5fca83c197751af760a" gracePeriod=30 Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.025200 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.183:3000/\": EOF" Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.439638 4800 generic.go:334] "Generic (PLEG): container finished" podID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerID="09f23df7bac7e4fb39baf596b648f2eb5be6c6333f805b6cb7bb5cc8dd16849f" exitCode=0 Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.439671 4800 generic.go:334] "Generic (PLEG): container finished" podID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerID="fb0003d0f03cf57a239873879dfea612ab34cb02fb5bb5fca83c197751af760a" exitCode=2 Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.439679 4800 generic.go:334] "Generic (PLEG): container finished" podID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerID="46d08c25106198b99388cc8911a0911797fe3e0042bfcd6ece8037d995812b91" exitCode=0 Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.439755 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"545e96ca-eabd-4864-b5b4-b3c27825e583","Type":"ContainerDied","Data":"09f23df7bac7e4fb39baf596b648f2eb5be6c6333f805b6cb7bb5cc8dd16849f"} Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.439784 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"545e96ca-eabd-4864-b5b4-b3c27825e583","Type":"ContainerDied","Data":"fb0003d0f03cf57a239873879dfea612ab34cb02fb5bb5fca83c197751af760a"} Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.439794 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"545e96ca-eabd-4864-b5b4-b3c27825e583","Type":"ContainerDied","Data":"46d08c25106198b99388cc8911a0911797fe3e0042bfcd6ece8037d995812b91"} Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.442041 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" event={"ID":"bcd422c7-527d-4fe3-802f-72656b4bf034","Type":"ContainerStarted","Data":"12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6"} Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.442309 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.468876 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" podStartSLOduration=3.468835162 podStartE2EDuration="3.468835162s" podCreationTimestamp="2025-11-25 15:41:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:41:35.461205425 +0000 UTC m=+1456.515613907" watchObservedRunningTime="2025-11-25 15:41:35.468835162 +0000 UTC m=+1456.523243644" Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.717259 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.717537 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c91a204b-3779-4299-a6c0-1b4fa3c89045" containerName="nova-api-log" containerID="cri-o://2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895" gracePeriod=30 Nov 25 15:41:35 crc kubenswrapper[4800]: I1125 15:41:35.717694 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c91a204b-3779-4299-a6c0-1b4fa3c89045" containerName="nova-api-api" containerID="cri-o://2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14" gracePeriod=30 Nov 25 15:41:36 crc kubenswrapper[4800]: I1125 15:41:36.453716 4800 generic.go:334] "Generic (PLEG): container finished" podID="c91a204b-3779-4299-a6c0-1b4fa3c89045" containerID="2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895" exitCode=143 Nov 25 15:41:36 crc kubenswrapper[4800]: I1125 15:41:36.453807 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c91a204b-3779-4299-a6c0-1b4fa3c89045","Type":"ContainerDied","Data":"2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895"} Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.480340 4800 generic.go:334] "Generic (PLEG): container finished" podID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerID="08b8a92d9bf0741b7ea5c0627db2174ff7140d67bf9d64f4baf62cbf0faf414f" exitCode=0 Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.480676 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"545e96ca-eabd-4864-b5b4-b3c27825e583","Type":"ContainerDied","Data":"08b8a92d9bf0741b7ea5c0627db2174ff7140d67bf9d64f4baf62cbf0faf414f"} Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.671980 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.692107 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-sg-core-conf-yaml\") pod \"545e96ca-eabd-4864-b5b4-b3c27825e583\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.692237 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/545e96ca-eabd-4864-b5b4-b3c27825e583-log-httpd\") pod \"545e96ca-eabd-4864-b5b4-b3c27825e583\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.692352 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-combined-ca-bundle\") pod \"545e96ca-eabd-4864-b5b4-b3c27825e583\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.692425 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-scripts\") pod \"545e96ca-eabd-4864-b5b4-b3c27825e583\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.692593 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-ceilometer-tls-certs\") pod \"545e96ca-eabd-4864-b5b4-b3c27825e583\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.692657 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lv89g\" (UniqueName: \"kubernetes.io/projected/545e96ca-eabd-4864-b5b4-b3c27825e583-kube-api-access-lv89g\") pod \"545e96ca-eabd-4864-b5b4-b3c27825e583\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.692712 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-config-data\") pod \"545e96ca-eabd-4864-b5b4-b3c27825e583\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.692807 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/545e96ca-eabd-4864-b5b4-b3c27825e583-run-httpd\") pod \"545e96ca-eabd-4864-b5b4-b3c27825e583\" (UID: \"545e96ca-eabd-4864-b5b4-b3c27825e583\") " Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.693805 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/545e96ca-eabd-4864-b5b4-b3c27825e583-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "545e96ca-eabd-4864-b5b4-b3c27825e583" (UID: "545e96ca-eabd-4864-b5b4-b3c27825e583"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.694959 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/545e96ca-eabd-4864-b5b4-b3c27825e583-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "545e96ca-eabd-4864-b5b4-b3c27825e583" (UID: "545e96ca-eabd-4864-b5b4-b3c27825e583"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.702076 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/545e96ca-eabd-4864-b5b4-b3c27825e583-kube-api-access-lv89g" (OuterVolumeSpecName: "kube-api-access-lv89g") pod "545e96ca-eabd-4864-b5b4-b3c27825e583" (UID: "545e96ca-eabd-4864-b5b4-b3c27825e583"). InnerVolumeSpecName "kube-api-access-lv89g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.712072 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-scripts" (OuterVolumeSpecName: "scripts") pod "545e96ca-eabd-4864-b5b4-b3c27825e583" (UID: "545e96ca-eabd-4864-b5b4-b3c27825e583"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.728229 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "545e96ca-eabd-4864-b5b4-b3c27825e583" (UID: "545e96ca-eabd-4864-b5b4-b3c27825e583"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.763290 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "545e96ca-eabd-4864-b5b4-b3c27825e583" (UID: "545e96ca-eabd-4864-b5b4-b3c27825e583"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.782046 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "545e96ca-eabd-4864-b5b4-b3c27825e583" (UID: "545e96ca-eabd-4864-b5b4-b3c27825e583"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.794885 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lv89g\" (UniqueName: \"kubernetes.io/projected/545e96ca-eabd-4864-b5b4-b3c27825e583-kube-api-access-lv89g\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.795601 4800 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/545e96ca-eabd-4864-b5b4-b3c27825e583-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.795666 4800 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.795686 4800 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/545e96ca-eabd-4864-b5b4-b3c27825e583-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.795699 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.795713 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.795743 4800 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.810633 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-config-data" (OuterVolumeSpecName: "config-data") pod "545e96ca-eabd-4864-b5b4-b3c27825e583" (UID: "545e96ca-eabd-4864-b5b4-b3c27825e583"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:38 crc kubenswrapper[4800]: I1125 15:41:38.897839 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/545e96ca-eabd-4864-b5b4-b3c27825e583-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.167502 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.206764 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91a204b-3779-4299-a6c0-1b4fa3c89045-config-data\") pod \"c91a204b-3779-4299-a6c0-1b4fa3c89045\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.207226 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91a204b-3779-4299-a6c0-1b4fa3c89045-combined-ca-bundle\") pod \"c91a204b-3779-4299-a6c0-1b4fa3c89045\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.207272 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c91a204b-3779-4299-a6c0-1b4fa3c89045-logs\") pod \"c91a204b-3779-4299-a6c0-1b4fa3c89045\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.207352 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8h2k\" (UniqueName: \"kubernetes.io/projected/c91a204b-3779-4299-a6c0-1b4fa3c89045-kube-api-access-d8h2k\") pod \"c91a204b-3779-4299-a6c0-1b4fa3c89045\" (UID: \"c91a204b-3779-4299-a6c0-1b4fa3c89045\") " Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.207948 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c91a204b-3779-4299-a6c0-1b4fa3c89045-logs" (OuterVolumeSpecName: "logs") pod "c91a204b-3779-4299-a6c0-1b4fa3c89045" (UID: "c91a204b-3779-4299-a6c0-1b4fa3c89045"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.213030 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c91a204b-3779-4299-a6c0-1b4fa3c89045-kube-api-access-d8h2k" (OuterVolumeSpecName: "kube-api-access-d8h2k") pod "c91a204b-3779-4299-a6c0-1b4fa3c89045" (UID: "c91a204b-3779-4299-a6c0-1b4fa3c89045"). InnerVolumeSpecName "kube-api-access-d8h2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.235215 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c91a204b-3779-4299-a6c0-1b4fa3c89045-config-data" (OuterVolumeSpecName: "config-data") pod "c91a204b-3779-4299-a6c0-1b4fa3c89045" (UID: "c91a204b-3779-4299-a6c0-1b4fa3c89045"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.250462 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c91a204b-3779-4299-a6c0-1b4fa3c89045-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c91a204b-3779-4299-a6c0-1b4fa3c89045" (UID: "c91a204b-3779-4299-a6c0-1b4fa3c89045"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.309246 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8h2k\" (UniqueName: \"kubernetes.io/projected/c91a204b-3779-4299-a6c0-1b4fa3c89045-kube-api-access-d8h2k\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.309302 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91a204b-3779-4299-a6c0-1b4fa3c89045-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.309316 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91a204b-3779-4299-a6c0-1b4fa3c89045-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.309329 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c91a204b-3779-4299-a6c0-1b4fa3c89045-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.491855 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"545e96ca-eabd-4864-b5b4-b3c27825e583","Type":"ContainerDied","Data":"9457a50fde1b1192bbc2180ad69e378d45d7c445797501e1d1cba0d9ab67e4e8"} Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.491919 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.491941 4800 scope.go:117] "RemoveContainer" containerID="09f23df7bac7e4fb39baf596b648f2eb5be6c6333f805b6cb7bb5cc8dd16849f" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.494891 4800 generic.go:334] "Generic (PLEG): container finished" podID="c91a204b-3779-4299-a6c0-1b4fa3c89045" containerID="2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14" exitCode=0 Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.494937 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c91a204b-3779-4299-a6c0-1b4fa3c89045","Type":"ContainerDied","Data":"2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14"} Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.494959 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.494972 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c91a204b-3779-4299-a6c0-1b4fa3c89045","Type":"ContainerDied","Data":"acfed66152acc723c08da5f75b12dfb111e2cf9554159af6e19e613f90333afb"} Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.529260 4800 scope.go:117] "RemoveContainer" containerID="fb0003d0f03cf57a239873879dfea612ab34cb02fb5bb5fca83c197751af760a" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.533227 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.552011 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.578008 4800 scope.go:117] "RemoveContainer" containerID="08b8a92d9bf0741b7ea5c0627db2174ff7140d67bf9d64f4baf62cbf0faf414f" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.581103 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.593487 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628057 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:41:39 crc kubenswrapper[4800]: E1125 15:41:39.628555 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="ceilometer-notification-agent" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628578 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="ceilometer-notification-agent" Nov 25 15:41:39 crc kubenswrapper[4800]: E1125 15:41:39.628588 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c91a204b-3779-4299-a6c0-1b4fa3c89045" containerName="nova-api-log" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628595 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c91a204b-3779-4299-a6c0-1b4fa3c89045" containerName="nova-api-log" Nov 25 15:41:39 crc kubenswrapper[4800]: E1125 15:41:39.628613 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="ceilometer-central-agent" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628621 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="ceilometer-central-agent" Nov 25 15:41:39 crc kubenswrapper[4800]: E1125 15:41:39.628641 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c91a204b-3779-4299-a6c0-1b4fa3c89045" containerName="nova-api-api" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628647 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c91a204b-3779-4299-a6c0-1b4fa3c89045" containerName="nova-api-api" Nov 25 15:41:39 crc kubenswrapper[4800]: E1125 15:41:39.628665 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="proxy-httpd" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628671 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="proxy-httpd" Nov 25 15:41:39 crc kubenswrapper[4800]: E1125 15:41:39.628684 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="sg-core" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628692 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="sg-core" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628874 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="sg-core" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628889 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c91a204b-3779-4299-a6c0-1b4fa3c89045" containerName="nova-api-api" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628900 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="ceilometer-central-agent" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628907 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="proxy-httpd" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628923 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c91a204b-3779-4299-a6c0-1b4fa3c89045" containerName="nova-api-log" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.628939 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" containerName="ceilometer-notification-agent" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.630748 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.632990 4800 scope.go:117] "RemoveContainer" containerID="46d08c25106198b99388cc8911a0911797fe3e0042bfcd6ece8037d995812b91" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.633686 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.633727 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.634779 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.637018 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.639175 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.641766 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.642038 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.642086 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.645524 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.662393 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.677240 4800 scope.go:117] "RemoveContainer" containerID="2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.695183 4800 scope.go:117] "RemoveContainer" containerID="2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.712736 4800 scope.go:117] "RemoveContainer" containerID="2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14" Nov 25 15:41:39 crc kubenswrapper[4800]: E1125 15:41:39.713359 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14\": container with ID starting with 2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14 not found: ID does not exist" containerID="2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.713415 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14"} err="failed to get container status \"2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14\": rpc error: code = NotFound desc = could not find container \"2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14\": container with ID starting with 2dbcc488c0d47b655707c198132e11dec29d707835109039c9a3d3986c107d14 not found: ID does not exist" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.713443 4800 scope.go:117] "RemoveContainer" containerID="2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895" Nov 25 15:41:39 crc kubenswrapper[4800]: E1125 15:41:39.713832 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895\": container with ID starting with 2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895 not found: ID does not exist" containerID="2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.713865 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895"} err="failed to get container status \"2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895\": rpc error: code = NotFound desc = could not find container \"2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895\": container with ID starting with 2b5635aa1c2881cc60be999ceb599fa9e6d1c387f7831c51a0bff0dd82da3895 not found: ID does not exist" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.776156 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.799547 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="545e96ca-eabd-4864-b5b4-b3c27825e583" path="/var/lib/kubelet/pods/545e96ca-eabd-4864-b5b4-b3c27825e583/volumes" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.800302 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c91a204b-3779-4299-a6c0-1b4fa3c89045" path="/var/lib/kubelet/pods/c91a204b-3779-4299-a6c0-1b4fa3c89045/volumes" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.800929 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.816659 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.817446 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8eb28176-4988-4dcb-a2b7-20443b38db81-logs\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.817490 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.817513 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-config-data\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.817612 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.818531 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-scripts\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.818598 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83921bea-cb41-4b95-8e56-4f49cae7cba3-log-httpd\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.818627 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83921bea-cb41-4b95-8e56-4f49cae7cba3-run-httpd\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.818771 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mqwr\" (UniqueName: \"kubernetes.io/projected/83921bea-cb41-4b95-8e56-4f49cae7cba3-kube-api-access-6mqwr\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.818807 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.818900 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4stjg\" (UniqueName: \"kubernetes.io/projected/8eb28176-4988-4dcb-a2b7-20443b38db81-kube-api-access-4stjg\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.818951 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-config-data\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.818983 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-public-tls-certs\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.819009 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-internal-tls-certs\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921014 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-config-data\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921332 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-public-tls-certs\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921356 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-internal-tls-certs\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921416 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921499 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8eb28176-4988-4dcb-a2b7-20443b38db81-logs\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921535 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921555 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-config-data\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921587 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921623 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-scripts\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921651 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83921bea-cb41-4b95-8e56-4f49cae7cba3-log-httpd\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921670 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83921bea-cb41-4b95-8e56-4f49cae7cba3-run-httpd\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921701 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mqwr\" (UniqueName: \"kubernetes.io/projected/83921bea-cb41-4b95-8e56-4f49cae7cba3-kube-api-access-6mqwr\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921722 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.921750 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4stjg\" (UniqueName: \"kubernetes.io/projected/8eb28176-4988-4dcb-a2b7-20443b38db81-kube-api-access-4stjg\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.922391 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83921bea-cb41-4b95-8e56-4f49cae7cba3-log-httpd\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.922455 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8eb28176-4988-4dcb-a2b7-20443b38db81-logs\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.922747 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83921bea-cb41-4b95-8e56-4f49cae7cba3-run-httpd\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.925879 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-public-tls-certs\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.925940 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-config-data\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.926239 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.926261 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-internal-tls-certs\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.927185 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-config-data\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.927366 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.929382 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-scripts\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.935147 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.937823 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4stjg\" (UniqueName: \"kubernetes.io/projected/8eb28176-4988-4dcb-a2b7-20443b38db81-kube-api-access-4stjg\") pod \"nova-api-0\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " pod="openstack/nova-api-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.937881 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mqwr\" (UniqueName: \"kubernetes.io/projected/83921bea-cb41-4b95-8e56-4f49cae7cba3-kube-api-access-6mqwr\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.945159 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.962489 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 15:41:39 crc kubenswrapper[4800]: I1125 15:41:39.976613 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:41:40 crc kubenswrapper[4800]: I1125 15:41:40.467055 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:40 crc kubenswrapper[4800]: I1125 15:41:40.481067 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 15:41:40 crc kubenswrapper[4800]: I1125 15:41:40.668294 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83921bea-cb41-4b95-8e56-4f49cae7cba3","Type":"ContainerStarted","Data":"aac89edd3df31dcba710a55d212026560abceaf25321cf38167c0747a340138f"} Nov 25 15:41:40 crc kubenswrapper[4800]: I1125 15:41:40.683860 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8eb28176-4988-4dcb-a2b7-20443b38db81","Type":"ContainerStarted","Data":"353c1ff1cc54c15a41d0c17252eeddc0e70f9afdc64f5d729b4ddeb142bf62ad"} Nov 25 15:41:40 crc kubenswrapper[4800]: I1125 15:41:40.701218 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 25 15:41:40 crc kubenswrapper[4800]: I1125 15:41:40.932755 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-nntxf"] Nov 25 15:41:40 crc kubenswrapper[4800]: I1125 15:41:40.935007 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:40 crc kubenswrapper[4800]: I1125 15:41:40.938795 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 25 15:41:40 crc kubenswrapper[4800]: I1125 15:41:40.939386 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 25 15:41:40 crc kubenswrapper[4800]: I1125 15:41:40.944245 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-nntxf"] Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.045511 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-nntxf\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.046171 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-scripts\") pod \"nova-cell1-cell-mapping-nntxf\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.046347 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qn9h5\" (UniqueName: \"kubernetes.io/projected/33df2ae0-fba8-410b-8d25-4e8951046509-kube-api-access-qn9h5\") pod \"nova-cell1-cell-mapping-nntxf\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.046503 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-config-data\") pod \"nova-cell1-cell-mapping-nntxf\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.147824 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-scripts\") pod \"nova-cell1-cell-mapping-nntxf\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.147920 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qn9h5\" (UniqueName: \"kubernetes.io/projected/33df2ae0-fba8-410b-8d25-4e8951046509-kube-api-access-qn9h5\") pod \"nova-cell1-cell-mapping-nntxf\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.147970 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-config-data\") pod \"nova-cell1-cell-mapping-nntxf\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.148034 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-nntxf\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.151814 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-config-data\") pod \"nova-cell1-cell-mapping-nntxf\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.156278 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-scripts\") pod \"nova-cell1-cell-mapping-nntxf\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.164123 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-nntxf\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.168970 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qn9h5\" (UniqueName: \"kubernetes.io/projected/33df2ae0-fba8-410b-8d25-4e8951046509-kube-api-access-qn9h5\") pod \"nova-cell1-cell-mapping-nntxf\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.277325 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.706646 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8eb28176-4988-4dcb-a2b7-20443b38db81","Type":"ContainerStarted","Data":"967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41"} Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.707408 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8eb28176-4988-4dcb-a2b7-20443b38db81","Type":"ContainerStarted","Data":"08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04"} Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.710954 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83921bea-cb41-4b95-8e56-4f49cae7cba3","Type":"ContainerStarted","Data":"95a43851290ab3ca21c711171aa3fb4632ba69eebc8052a48e6062109743050d"} Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.740492 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.740463943 podStartE2EDuration="2.740463943s" podCreationTimestamp="2025-11-25 15:41:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:41:41.732178619 +0000 UTC m=+1462.786587091" watchObservedRunningTime="2025-11-25 15:41:41.740463943 +0000 UTC m=+1462.794872435" Nov 25 15:41:41 crc kubenswrapper[4800]: I1125 15:41:41.757689 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-nntxf"] Nov 25 15:41:42 crc kubenswrapper[4800]: I1125 15:41:42.640903 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:41:42 crc kubenswrapper[4800]: I1125 15:41:42.641334 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:41:42 crc kubenswrapper[4800]: I1125 15:41:42.641397 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:41:42 crc kubenswrapper[4800]: I1125 15:41:42.642258 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b8afc7cca40a5009587f2c6768805585b09b1bfca0b79d34753356c624725482"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 15:41:42 crc kubenswrapper[4800]: I1125 15:41:42.642324 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://b8afc7cca40a5009587f2c6768805585b09b1bfca0b79d34753356c624725482" gracePeriod=600 Nov 25 15:41:42 crc kubenswrapper[4800]: I1125 15:41:42.727454 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83921bea-cb41-4b95-8e56-4f49cae7cba3","Type":"ContainerStarted","Data":"3448eb5f60230f656f77ca6b19dc952b1bdf03162467550c1a85c9a5f14af140"} Nov 25 15:41:42 crc kubenswrapper[4800]: I1125 15:41:42.729591 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nntxf" event={"ID":"33df2ae0-fba8-410b-8d25-4e8951046509","Type":"ContainerStarted","Data":"97abb9e624feaeaff7994cc5c638718d8e68b4500f6add6435d62c6a932bdcdb"} Nov 25 15:41:42 crc kubenswrapper[4800]: I1125 15:41:42.729651 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nntxf" event={"ID":"33df2ae0-fba8-410b-8d25-4e8951046509","Type":"ContainerStarted","Data":"ea1b567604621da5946eac0ee66ab7ae91acd99f7f0a45044b73c0016775c09e"} Nov 25 15:41:42 crc kubenswrapper[4800]: I1125 15:41:42.750019 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-nntxf" podStartSLOduration=2.749999052 podStartE2EDuration="2.749999052s" podCreationTimestamp="2025-11-25 15:41:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:41:42.746270662 +0000 UTC m=+1463.800679144" watchObservedRunningTime="2025-11-25 15:41:42.749999052 +0000 UTC m=+1463.804407534" Nov 25 15:41:42 crc kubenswrapper[4800]: I1125 15:41:42.957826 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.048590 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f7bbc55bc-hdtvr"] Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.048919 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" podUID="45a8dfaa-a31d-4d43-b445-cc559b7420f3" containerName="dnsmasq-dns" containerID="cri-o://993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df" gracePeriod=10 Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.489036 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.607448 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-config\") pod \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.607507 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-ovsdbserver-sb\") pod \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.607617 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-dns-svc\") pod \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.607653 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-ovsdbserver-nb\") pod \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.607684 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s42hn\" (UniqueName: \"kubernetes.io/projected/45a8dfaa-a31d-4d43-b445-cc559b7420f3-kube-api-access-s42hn\") pod \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\" (UID: \"45a8dfaa-a31d-4d43-b445-cc559b7420f3\") " Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.621290 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45a8dfaa-a31d-4d43-b445-cc559b7420f3-kube-api-access-s42hn" (OuterVolumeSpecName: "kube-api-access-s42hn") pod "45a8dfaa-a31d-4d43-b445-cc559b7420f3" (UID: "45a8dfaa-a31d-4d43-b445-cc559b7420f3"). InnerVolumeSpecName "kube-api-access-s42hn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.657012 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "45a8dfaa-a31d-4d43-b445-cc559b7420f3" (UID: "45a8dfaa-a31d-4d43-b445-cc559b7420f3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.657438 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "45a8dfaa-a31d-4d43-b445-cc559b7420f3" (UID: "45a8dfaa-a31d-4d43-b445-cc559b7420f3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.658349 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "45a8dfaa-a31d-4d43-b445-cc559b7420f3" (UID: "45a8dfaa-a31d-4d43-b445-cc559b7420f3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.671748 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-config" (OuterVolumeSpecName: "config") pod "45a8dfaa-a31d-4d43-b445-cc559b7420f3" (UID: "45a8dfaa-a31d-4d43-b445-cc559b7420f3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.710629 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.710676 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.710691 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.710704 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45a8dfaa-a31d-4d43-b445-cc559b7420f3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.710733 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s42hn\" (UniqueName: \"kubernetes.io/projected/45a8dfaa-a31d-4d43-b445-cc559b7420f3-kube-api-access-s42hn\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.747327 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83921bea-cb41-4b95-8e56-4f49cae7cba3","Type":"ContainerStarted","Data":"4b3d9c7c7d5f4095b55b7aa4ee435bb28afe8abd069a1d5124945ce34593e04d"} Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.762966 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="b8afc7cca40a5009587f2c6768805585b09b1bfca0b79d34753356c624725482" exitCode=0 Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.763085 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"b8afc7cca40a5009587f2c6768805585b09b1bfca0b79d34753356c624725482"} Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.763152 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f"} Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.763177 4800 scope.go:117] "RemoveContainer" containerID="d97d2c0b8a05e269074c76cf21138a3aeaeac0cd9bbe1be26dcd5369887e11f6" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.770170 4800 generic.go:334] "Generic (PLEG): container finished" podID="45a8dfaa-a31d-4d43-b445-cc559b7420f3" containerID="993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df" exitCode=0 Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.771960 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" event={"ID":"45a8dfaa-a31d-4d43-b445-cc559b7420f3","Type":"ContainerDied","Data":"993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df"} Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.772037 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" event={"ID":"45a8dfaa-a31d-4d43-b445-cc559b7420f3","Type":"ContainerDied","Data":"80157c60fae850124d05d2526950e91d7152849efded36a186375e2e23fa94a0"} Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.776187 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f7bbc55bc-hdtvr" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.806879 4800 scope.go:117] "RemoveContainer" containerID="993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.833330 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f7bbc55bc-hdtvr"] Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.834674 4800 scope.go:117] "RemoveContainer" containerID="975244a46350810f89cc531b9b338d81d0de00f8977c272d738e89fdb666cafd" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.841752 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f7bbc55bc-hdtvr"] Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.853149 4800 scope.go:117] "RemoveContainer" containerID="993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df" Nov 25 15:41:43 crc kubenswrapper[4800]: E1125 15:41:43.853493 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df\": container with ID starting with 993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df not found: ID does not exist" containerID="993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.853527 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df"} err="failed to get container status \"993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df\": rpc error: code = NotFound desc = could not find container \"993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df\": container with ID starting with 993f40b5a9a33714d3f008d987c4e2bae3961c947afca99556ade8aeb19a33df not found: ID does not exist" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.853550 4800 scope.go:117] "RemoveContainer" containerID="975244a46350810f89cc531b9b338d81d0de00f8977c272d738e89fdb666cafd" Nov 25 15:41:43 crc kubenswrapper[4800]: E1125 15:41:43.853780 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"975244a46350810f89cc531b9b338d81d0de00f8977c272d738e89fdb666cafd\": container with ID starting with 975244a46350810f89cc531b9b338d81d0de00f8977c272d738e89fdb666cafd not found: ID does not exist" containerID="975244a46350810f89cc531b9b338d81d0de00f8977c272d738e89fdb666cafd" Nov 25 15:41:43 crc kubenswrapper[4800]: I1125 15:41:43.853800 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"975244a46350810f89cc531b9b338d81d0de00f8977c272d738e89fdb666cafd"} err="failed to get container status \"975244a46350810f89cc531b9b338d81d0de00f8977c272d738e89fdb666cafd\": rpc error: code = NotFound desc = could not find container \"975244a46350810f89cc531b9b338d81d0de00f8977c272d738e89fdb666cafd\": container with ID starting with 975244a46350810f89cc531b9b338d81d0de00f8977c272d738e89fdb666cafd not found: ID does not exist" Nov 25 15:41:44 crc kubenswrapper[4800]: I1125 15:41:44.787794 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83921bea-cb41-4b95-8e56-4f49cae7cba3","Type":"ContainerStarted","Data":"eee29a325706c6dbd9944099e81e5dcbefbcf4d85e88f1061c3e2c7708f09d8b"} Nov 25 15:41:44 crc kubenswrapper[4800]: I1125 15:41:44.788167 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 15:41:44 crc kubenswrapper[4800]: I1125 15:41:44.819589 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.183841418 podStartE2EDuration="5.819568756s" podCreationTimestamp="2025-11-25 15:41:39 +0000 UTC" firstStartedPulling="2025-11-25 15:41:40.473813614 +0000 UTC m=+1461.528222096" lastFinishedPulling="2025-11-25 15:41:44.109540952 +0000 UTC m=+1465.163949434" observedRunningTime="2025-11-25 15:41:44.815244229 +0000 UTC m=+1465.869652721" watchObservedRunningTime="2025-11-25 15:41:44.819568756 +0000 UTC m=+1465.873977238" Nov 25 15:41:45 crc kubenswrapper[4800]: I1125 15:41:45.798947 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45a8dfaa-a31d-4d43-b445-cc559b7420f3" path="/var/lib/kubelet/pods/45a8dfaa-a31d-4d43-b445-cc559b7420f3/volumes" Nov 25 15:41:47 crc kubenswrapper[4800]: I1125 15:41:47.830494 4800 generic.go:334] "Generic (PLEG): container finished" podID="33df2ae0-fba8-410b-8d25-4e8951046509" containerID="97abb9e624feaeaff7994cc5c638718d8e68b4500f6add6435d62c6a932bdcdb" exitCode=0 Nov 25 15:41:47 crc kubenswrapper[4800]: I1125 15:41:47.830588 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nntxf" event={"ID":"33df2ae0-fba8-410b-8d25-4e8951046509","Type":"ContainerDied","Data":"97abb9e624feaeaff7994cc5c638718d8e68b4500f6add6435d62c6a932bdcdb"} Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.178516 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.357564 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-combined-ca-bundle\") pod \"33df2ae0-fba8-410b-8d25-4e8951046509\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.357753 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-config-data\") pod \"33df2ae0-fba8-410b-8d25-4e8951046509\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.357910 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-scripts\") pod \"33df2ae0-fba8-410b-8d25-4e8951046509\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.358036 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qn9h5\" (UniqueName: \"kubernetes.io/projected/33df2ae0-fba8-410b-8d25-4e8951046509-kube-api-access-qn9h5\") pod \"33df2ae0-fba8-410b-8d25-4e8951046509\" (UID: \"33df2ae0-fba8-410b-8d25-4e8951046509\") " Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.366347 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-scripts" (OuterVolumeSpecName: "scripts") pod "33df2ae0-fba8-410b-8d25-4e8951046509" (UID: "33df2ae0-fba8-410b-8d25-4e8951046509"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.366948 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33df2ae0-fba8-410b-8d25-4e8951046509-kube-api-access-qn9h5" (OuterVolumeSpecName: "kube-api-access-qn9h5") pod "33df2ae0-fba8-410b-8d25-4e8951046509" (UID: "33df2ae0-fba8-410b-8d25-4e8951046509"). InnerVolumeSpecName "kube-api-access-qn9h5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.387068 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33df2ae0-fba8-410b-8d25-4e8951046509" (UID: "33df2ae0-fba8-410b-8d25-4e8951046509"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.410330 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-config-data" (OuterVolumeSpecName: "config-data") pod "33df2ae0-fba8-410b-8d25-4e8951046509" (UID: "33df2ae0-fba8-410b-8d25-4e8951046509"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.459821 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qn9h5\" (UniqueName: \"kubernetes.io/projected/33df2ae0-fba8-410b-8d25-4e8951046509-kube-api-access-qn9h5\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.459887 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.459898 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.459908 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33df2ae0-fba8-410b-8d25-4e8951046509-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.853587 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nntxf" event={"ID":"33df2ae0-fba8-410b-8d25-4e8951046509","Type":"ContainerDied","Data":"ea1b567604621da5946eac0ee66ab7ae91acd99f7f0a45044b73c0016775c09e"} Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.853626 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nntxf" Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.853648 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea1b567604621da5946eac0ee66ab7ae91acd99f7f0a45044b73c0016775c09e" Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.977235 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 15:41:49 crc kubenswrapper[4800]: I1125 15:41:49.977561 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 15:41:50 crc kubenswrapper[4800]: I1125 15:41:50.105676 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:50 crc kubenswrapper[4800]: I1125 15:41:50.124045 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:41:50 crc kubenswrapper[4800]: I1125 15:41:50.124330 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca" containerName="nova-scheduler-scheduler" containerID="cri-o://47db4de92612a77d52246308fd86a7968de42e76cf0e60ed676fe57dd43086d8" gracePeriod=30 Nov 25 15:41:50 crc kubenswrapper[4800]: I1125 15:41:50.137059 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:50 crc kubenswrapper[4800]: I1125 15:41:50.137351 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="02cf6678-62f5-447b-bc73-32acc218e062" containerName="nova-metadata-log" containerID="cri-o://405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7" gracePeriod=30 Nov 25 15:41:50 crc kubenswrapper[4800]: I1125 15:41:50.137417 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="02cf6678-62f5-447b-bc73-32acc218e062" containerName="nova-metadata-metadata" containerID="cri-o://42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043" gracePeriod=30 Nov 25 15:41:50 crc kubenswrapper[4800]: E1125 15:41:50.809496 4800 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="47db4de92612a77d52246308fd86a7968de42e76cf0e60ed676fe57dd43086d8" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 15:41:50 crc kubenswrapper[4800]: E1125 15:41:50.810772 4800 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="47db4de92612a77d52246308fd86a7968de42e76cf0e60ed676fe57dd43086d8" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 15:41:50 crc kubenswrapper[4800]: E1125 15:41:50.812028 4800 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="47db4de92612a77d52246308fd86a7968de42e76cf0e60ed676fe57dd43086d8" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 15:41:50 crc kubenswrapper[4800]: E1125 15:41:50.812127 4800 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca" containerName="nova-scheduler-scheduler" Nov 25 15:41:50 crc kubenswrapper[4800]: I1125 15:41:50.868704 4800 generic.go:334] "Generic (PLEG): container finished" podID="02cf6678-62f5-447b-bc73-32acc218e062" containerID="405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7" exitCode=143 Nov 25 15:41:50 crc kubenswrapper[4800]: I1125 15:41:50.868959 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8eb28176-4988-4dcb-a2b7-20443b38db81" containerName="nova-api-log" containerID="cri-o://08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04" gracePeriod=30 Nov 25 15:41:50 crc kubenswrapper[4800]: I1125 15:41:50.869057 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"02cf6678-62f5-447b-bc73-32acc218e062","Type":"ContainerDied","Data":"405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7"} Nov 25 15:41:50 crc kubenswrapper[4800]: I1125 15:41:50.869502 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8eb28176-4988-4dcb-a2b7-20443b38db81" containerName="nova-api-api" containerID="cri-o://967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41" gracePeriod=30 Nov 25 15:41:50 crc kubenswrapper[4800]: I1125 15:41:50.874694 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8eb28176-4988-4dcb-a2b7-20443b38db81" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.187:8774/\": EOF" Nov 25 15:41:50 crc kubenswrapper[4800]: I1125 15:41:50.875164 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8eb28176-4988-4dcb-a2b7-20443b38db81" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.187:8774/\": EOF" Nov 25 15:41:51 crc kubenswrapper[4800]: I1125 15:41:51.879269 4800 generic.go:334] "Generic (PLEG): container finished" podID="8eb28176-4988-4dcb-a2b7-20443b38db81" containerID="08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04" exitCode=143 Nov 25 15:41:51 crc kubenswrapper[4800]: I1125 15:41:51.879353 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8eb28176-4988-4dcb-a2b7-20443b38db81","Type":"ContainerDied","Data":"08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04"} Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.263422 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="02cf6678-62f5-447b-bc73-32acc218e062" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.179:8775/\": read tcp 10.217.0.2:50710->10.217.0.179:8775: read: connection reset by peer" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.263466 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="02cf6678-62f5-447b-bc73-32acc218e062" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.179:8775/\": read tcp 10.217.0.2:50726->10.217.0.179:8775: read: connection reset by peer" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.705328 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.853419 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-combined-ca-bundle\") pod \"02cf6678-62f5-447b-bc73-32acc218e062\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.853585 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02cf6678-62f5-447b-bc73-32acc218e062-logs\") pod \"02cf6678-62f5-447b-bc73-32acc218e062\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.853630 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-nova-metadata-tls-certs\") pod \"02cf6678-62f5-447b-bc73-32acc218e062\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.853682 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-config-data\") pod \"02cf6678-62f5-447b-bc73-32acc218e062\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.853729 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wx7rf\" (UniqueName: \"kubernetes.io/projected/02cf6678-62f5-447b-bc73-32acc218e062-kube-api-access-wx7rf\") pod \"02cf6678-62f5-447b-bc73-32acc218e062\" (UID: \"02cf6678-62f5-447b-bc73-32acc218e062\") " Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.854007 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02cf6678-62f5-447b-bc73-32acc218e062-logs" (OuterVolumeSpecName: "logs") pod "02cf6678-62f5-447b-bc73-32acc218e062" (UID: "02cf6678-62f5-447b-bc73-32acc218e062"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.854314 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02cf6678-62f5-447b-bc73-32acc218e062-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.871072 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02cf6678-62f5-447b-bc73-32acc218e062-kube-api-access-wx7rf" (OuterVolumeSpecName: "kube-api-access-wx7rf") pod "02cf6678-62f5-447b-bc73-32acc218e062" (UID: "02cf6678-62f5-447b-bc73-32acc218e062"). InnerVolumeSpecName "kube-api-access-wx7rf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.887058 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "02cf6678-62f5-447b-bc73-32acc218e062" (UID: "02cf6678-62f5-447b-bc73-32acc218e062"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.887112 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-config-data" (OuterVolumeSpecName: "config-data") pod "02cf6678-62f5-447b-bc73-32acc218e062" (UID: "02cf6678-62f5-447b-bc73-32acc218e062"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.907500 4800 generic.go:334] "Generic (PLEG): container finished" podID="02cf6678-62f5-447b-bc73-32acc218e062" containerID="42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043" exitCode=0 Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.907552 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"02cf6678-62f5-447b-bc73-32acc218e062","Type":"ContainerDied","Data":"42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043"} Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.907584 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"02cf6678-62f5-447b-bc73-32acc218e062","Type":"ContainerDied","Data":"effde8d1fc8287eba223d6d9c7d1c24993954a907caf6ecca4acda2fb0f4b2ff"} Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.907599 4800 scope.go:117] "RemoveContainer" containerID="42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.907740 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.910623 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "02cf6678-62f5-447b-bc73-32acc218e062" (UID: "02cf6678-62f5-447b-bc73-32acc218e062"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.955997 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.956237 4800 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.956299 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02cf6678-62f5-447b-bc73-32acc218e062-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.956352 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wx7rf\" (UniqueName: \"kubernetes.io/projected/02cf6678-62f5-447b-bc73-32acc218e062-kube-api-access-wx7rf\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.964531 4800 scope.go:117] "RemoveContainer" containerID="405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.983548 4800 scope.go:117] "RemoveContainer" containerID="42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043" Nov 25 15:41:53 crc kubenswrapper[4800]: E1125 15:41:53.984061 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043\": container with ID starting with 42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043 not found: ID does not exist" containerID="42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.984175 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043"} err="failed to get container status \"42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043\": rpc error: code = NotFound desc = could not find container \"42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043\": container with ID starting with 42cf491097ebae5c230d12de06383e7faaf64a24468d5e6f4887fc7e2f4d1043 not found: ID does not exist" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.984267 4800 scope.go:117] "RemoveContainer" containerID="405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7" Nov 25 15:41:53 crc kubenswrapper[4800]: E1125 15:41:53.984922 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7\": container with ID starting with 405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7 not found: ID does not exist" containerID="405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7" Nov 25 15:41:53 crc kubenswrapper[4800]: I1125 15:41:53.984992 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7"} err="failed to get container status \"405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7\": rpc error: code = NotFound desc = could not find container \"405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7\": container with ID starting with 405b04e1b1bbd4a0c325caaac289bff714e626feea92726a23c8e523288d17e7 not found: ID does not exist" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.243323 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.253697 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.268050 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:54 crc kubenswrapper[4800]: E1125 15:41:54.268574 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a8dfaa-a31d-4d43-b445-cc559b7420f3" containerName="init" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.268591 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a8dfaa-a31d-4d43-b445-cc559b7420f3" containerName="init" Nov 25 15:41:54 crc kubenswrapper[4800]: E1125 15:41:54.268633 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02cf6678-62f5-447b-bc73-32acc218e062" containerName="nova-metadata-log" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.268642 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="02cf6678-62f5-447b-bc73-32acc218e062" containerName="nova-metadata-log" Nov 25 15:41:54 crc kubenswrapper[4800]: E1125 15:41:54.268658 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02cf6678-62f5-447b-bc73-32acc218e062" containerName="nova-metadata-metadata" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.268667 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="02cf6678-62f5-447b-bc73-32acc218e062" containerName="nova-metadata-metadata" Nov 25 15:41:54 crc kubenswrapper[4800]: E1125 15:41:54.268676 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a8dfaa-a31d-4d43-b445-cc559b7420f3" containerName="dnsmasq-dns" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.268683 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a8dfaa-a31d-4d43-b445-cc559b7420f3" containerName="dnsmasq-dns" Nov 25 15:41:54 crc kubenswrapper[4800]: E1125 15:41:54.268694 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33df2ae0-fba8-410b-8d25-4e8951046509" containerName="nova-manage" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.268703 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="33df2ae0-fba8-410b-8d25-4e8951046509" containerName="nova-manage" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.268935 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="02cf6678-62f5-447b-bc73-32acc218e062" containerName="nova-metadata-metadata" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.268961 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="02cf6678-62f5-447b-bc73-32acc218e062" containerName="nova-metadata-log" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.268983 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="33df2ae0-fba8-410b-8d25-4e8951046509" containerName="nova-manage" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.268996 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a8dfaa-a31d-4d43-b445-cc559b7420f3" containerName="dnsmasq-dns" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.270340 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.275440 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.275699 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.280738 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.362752 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.362855 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5x95\" (UniqueName: \"kubernetes.io/projected/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-kube-api-access-f5x95\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.363051 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.363242 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-logs\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.363376 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-config-data\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.465057 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5x95\" (UniqueName: \"kubernetes.io/projected/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-kube-api-access-f5x95\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.465164 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.465205 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-logs\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.465245 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-config-data\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.465375 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.465737 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-logs\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.470815 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.471030 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.472177 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-config-data\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.492870 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5x95\" (UniqueName: \"kubernetes.io/projected/9192bcbd-cbd0-4697-b97a-ccbd71fccb54-kube-api-access-f5x95\") pod \"nova-metadata-0\" (UID: \"9192bcbd-cbd0-4697-b97a-ccbd71fccb54\") " pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.618808 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.928957 4800 generic.go:334] "Generic (PLEG): container finished" podID="5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca" containerID="47db4de92612a77d52246308fd86a7968de42e76cf0e60ed676fe57dd43086d8" exitCode=0 Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.929040 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca","Type":"ContainerDied","Data":"47db4de92612a77d52246308fd86a7968de42e76cf0e60ed676fe57dd43086d8"} Nov 25 15:41:54 crc kubenswrapper[4800]: I1125 15:41:54.970617 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.083261 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrhpk\" (UniqueName: \"kubernetes.io/projected/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-kube-api-access-nrhpk\") pod \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\" (UID: \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\") " Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.083746 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-config-data\") pod \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\" (UID: \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\") " Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.084193 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-combined-ca-bundle\") pod \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\" (UID: \"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca\") " Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.089295 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-kube-api-access-nrhpk" (OuterVolumeSpecName: "kube-api-access-nrhpk") pod "5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca" (UID: "5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca"). InnerVolumeSpecName "kube-api-access-nrhpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.117393 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-config-data" (OuterVolumeSpecName: "config-data") pod "5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca" (UID: "5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.118059 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca" (UID: "5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:55 crc kubenswrapper[4800]: W1125 15:41:55.173344 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9192bcbd_cbd0_4697_b97a_ccbd71fccb54.slice/crio-0d92aaf1f57b02034d5710e8dcf7fd44dd57d8f114e336cc567ffab068a5b201 WatchSource:0}: Error finding container 0d92aaf1f57b02034d5710e8dcf7fd44dd57d8f114e336cc567ffab068a5b201: Status 404 returned error can't find the container with id 0d92aaf1f57b02034d5710e8dcf7fd44dd57d8f114e336cc567ffab068a5b201 Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.176199 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.186869 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrhpk\" (UniqueName: \"kubernetes.io/projected/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-kube-api-access-nrhpk\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.186939 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.186951 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.797772 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02cf6678-62f5-447b-bc73-32acc218e062" path="/var/lib/kubelet/pods/02cf6678-62f5-447b-bc73-32acc218e062/volumes" Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.944029 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca","Type":"ContainerDied","Data":"02a28ba54d6b99f0aa8626c73c28ea89826f7faf5f8ab617a69d99150de4926b"} Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.944094 4800 scope.go:117] "RemoveContainer" containerID="47db4de92612a77d52246308fd86a7968de42e76cf0e60ed676fe57dd43086d8" Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.944519 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.949966 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9192bcbd-cbd0-4697-b97a-ccbd71fccb54","Type":"ContainerStarted","Data":"78eeea8f83439b992964141531fcd1aeef4ebaccca7ccdc5f841864b942c8001"} Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.950013 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9192bcbd-cbd0-4697-b97a-ccbd71fccb54","Type":"ContainerStarted","Data":"0d92aaf1f57b02034d5710e8dcf7fd44dd57d8f114e336cc567ffab068a5b201"} Nov 25 15:41:55 crc kubenswrapper[4800]: I1125 15:41:55.985071 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.001927 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.010455 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:41:56 crc kubenswrapper[4800]: E1125 15:41:56.010924 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca" containerName="nova-scheduler-scheduler" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.010943 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca" containerName="nova-scheduler-scheduler" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.011146 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca" containerName="nova-scheduler-scheduler" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.011818 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.016513 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.020247 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.103331 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b5fce4f-89c0-47c1-a5c5-c4a86406502d-config-data\") pod \"nova-scheduler-0\" (UID: \"6b5fce4f-89c0-47c1-a5c5-c4a86406502d\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.103374 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b5fce4f-89c0-47c1-a5c5-c4a86406502d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6b5fce4f-89c0-47c1-a5c5-c4a86406502d\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.103674 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvxnv\" (UniqueName: \"kubernetes.io/projected/6b5fce4f-89c0-47c1-a5c5-c4a86406502d-kube-api-access-hvxnv\") pod \"nova-scheduler-0\" (UID: \"6b5fce4f-89c0-47c1-a5c5-c4a86406502d\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.206466 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvxnv\" (UniqueName: \"kubernetes.io/projected/6b5fce4f-89c0-47c1-a5c5-c4a86406502d-kube-api-access-hvxnv\") pod \"nova-scheduler-0\" (UID: \"6b5fce4f-89c0-47c1-a5c5-c4a86406502d\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.206650 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b5fce4f-89c0-47c1-a5c5-c4a86406502d-config-data\") pod \"nova-scheduler-0\" (UID: \"6b5fce4f-89c0-47c1-a5c5-c4a86406502d\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.206681 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b5fce4f-89c0-47c1-a5c5-c4a86406502d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6b5fce4f-89c0-47c1-a5c5-c4a86406502d\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.211670 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b5fce4f-89c0-47c1-a5c5-c4a86406502d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6b5fce4f-89c0-47c1-a5c5-c4a86406502d\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.214646 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b5fce4f-89c0-47c1-a5c5-c4a86406502d-config-data\") pod \"nova-scheduler-0\" (UID: \"6b5fce4f-89c0-47c1-a5c5-c4a86406502d\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.233053 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvxnv\" (UniqueName: \"kubernetes.io/projected/6b5fce4f-89c0-47c1-a5c5-c4a86406502d-kube-api-access-hvxnv\") pod \"nova-scheduler-0\" (UID: \"6b5fce4f-89c0-47c1-a5c5-c4a86406502d\") " pod="openstack/nova-scheduler-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.387245 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.664424 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.772541 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4stjg\" (UniqueName: \"kubernetes.io/projected/8eb28176-4988-4dcb-a2b7-20443b38db81-kube-api-access-4stjg\") pod \"8eb28176-4988-4dcb-a2b7-20443b38db81\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.772619 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-public-tls-certs\") pod \"8eb28176-4988-4dcb-a2b7-20443b38db81\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.772817 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-internal-tls-certs\") pod \"8eb28176-4988-4dcb-a2b7-20443b38db81\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.772890 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-config-data\") pod \"8eb28176-4988-4dcb-a2b7-20443b38db81\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.773119 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8eb28176-4988-4dcb-a2b7-20443b38db81-logs\") pod \"8eb28176-4988-4dcb-a2b7-20443b38db81\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.773290 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-combined-ca-bundle\") pod \"8eb28176-4988-4dcb-a2b7-20443b38db81\" (UID: \"8eb28176-4988-4dcb-a2b7-20443b38db81\") " Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.774154 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8eb28176-4988-4dcb-a2b7-20443b38db81-logs" (OuterVolumeSpecName: "logs") pod "8eb28176-4988-4dcb-a2b7-20443b38db81" (UID: "8eb28176-4988-4dcb-a2b7-20443b38db81"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.774340 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8eb28176-4988-4dcb-a2b7-20443b38db81-logs\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.779050 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8eb28176-4988-4dcb-a2b7-20443b38db81-kube-api-access-4stjg" (OuterVolumeSpecName: "kube-api-access-4stjg") pod "8eb28176-4988-4dcb-a2b7-20443b38db81" (UID: "8eb28176-4988-4dcb-a2b7-20443b38db81"). InnerVolumeSpecName "kube-api-access-4stjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.805180 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-config-data" (OuterVolumeSpecName: "config-data") pod "8eb28176-4988-4dcb-a2b7-20443b38db81" (UID: "8eb28176-4988-4dcb-a2b7-20443b38db81"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.807598 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8eb28176-4988-4dcb-a2b7-20443b38db81" (UID: "8eb28176-4988-4dcb-a2b7-20443b38db81"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.822085 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8eb28176-4988-4dcb-a2b7-20443b38db81" (UID: "8eb28176-4988-4dcb-a2b7-20443b38db81"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.824159 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8eb28176-4988-4dcb-a2b7-20443b38db81" (UID: "8eb28176-4988-4dcb-a2b7-20443b38db81"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.876236 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.876270 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4stjg\" (UniqueName: \"kubernetes.io/projected/8eb28176-4988-4dcb-a2b7-20443b38db81-kube-api-access-4stjg\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.876284 4800 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.876295 4800 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.876307 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8eb28176-4988-4dcb-a2b7-20443b38db81-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.906374 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 15:41:56 crc kubenswrapper[4800]: W1125 15:41:56.909932 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6b5fce4f_89c0_47c1_a5c5_c4a86406502d.slice/crio-57df5533ceb9486c6f97a2941a6b8df13aabb93e48e220b527c00e556f851bf9 WatchSource:0}: Error finding container 57df5533ceb9486c6f97a2941a6b8df13aabb93e48e220b527c00e556f851bf9: Status 404 returned error can't find the container with id 57df5533ceb9486c6f97a2941a6b8df13aabb93e48e220b527c00e556f851bf9 Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.966048 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9192bcbd-cbd0-4697-b97a-ccbd71fccb54","Type":"ContainerStarted","Data":"4eca406429e587056a126a5c3a990df93ce3a7284ee7acca576ce4a466f991e4"} Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.969259 4800 generic.go:334] "Generic (PLEG): container finished" podID="8eb28176-4988-4dcb-a2b7-20443b38db81" containerID="967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41" exitCode=0 Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.969316 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8eb28176-4988-4dcb-a2b7-20443b38db81","Type":"ContainerDied","Data":"967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41"} Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.969379 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8eb28176-4988-4dcb-a2b7-20443b38db81","Type":"ContainerDied","Data":"353c1ff1cc54c15a41d0c17252eeddc0e70f9afdc64f5d729b4ddeb142bf62ad"} Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.969381 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.969414 4800 scope.go:117] "RemoveContainer" containerID="967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41" Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.973572 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6b5fce4f-89c0-47c1-a5c5-c4a86406502d","Type":"ContainerStarted","Data":"57df5533ceb9486c6f97a2941a6b8df13aabb93e48e220b527c00e556f851bf9"} Nov 25 15:41:56 crc kubenswrapper[4800]: I1125 15:41:56.987728 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.987707129 podStartE2EDuration="2.987707129s" podCreationTimestamp="2025-11-25 15:41:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:41:56.984533133 +0000 UTC m=+1478.038941635" watchObservedRunningTime="2025-11-25 15:41:56.987707129 +0000 UTC m=+1478.042115611" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.010410 4800 scope.go:117] "RemoveContainer" containerID="08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.032413 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.043329 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.060603 4800 scope.go:117] "RemoveContainer" containerID="967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41" Nov 25 15:41:57 crc kubenswrapper[4800]: E1125 15:41:57.061249 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41\": container with ID starting with 967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41 not found: ID does not exist" containerID="967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.061280 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41"} err="failed to get container status \"967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41\": rpc error: code = NotFound desc = could not find container \"967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41\": container with ID starting with 967f3987e4414af59e44e20dc748d1f1577ed34a096ea001727d91c276c90f41 not found: ID does not exist" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.061304 4800 scope.go:117] "RemoveContainer" containerID="08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04" Nov 25 15:41:57 crc kubenswrapper[4800]: E1125 15:41:57.061510 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04\": container with ID starting with 08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04 not found: ID does not exist" containerID="08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.061531 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04"} err="failed to get container status \"08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04\": rpc error: code = NotFound desc = could not find container \"08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04\": container with ID starting with 08f4af5146d7727b9a9eae4cd2abe863a9c3afa4151e88d736ac32f893882d04 not found: ID does not exist" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.069441 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:57 crc kubenswrapper[4800]: E1125 15:41:57.070245 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8eb28176-4988-4dcb-a2b7-20443b38db81" containerName="nova-api-api" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.070270 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="8eb28176-4988-4dcb-a2b7-20443b38db81" containerName="nova-api-api" Nov 25 15:41:57 crc kubenswrapper[4800]: E1125 15:41:57.070295 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8eb28176-4988-4dcb-a2b7-20443b38db81" containerName="nova-api-log" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.070304 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="8eb28176-4988-4dcb-a2b7-20443b38db81" containerName="nova-api-log" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.070548 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="8eb28176-4988-4dcb-a2b7-20443b38db81" containerName="nova-api-log" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.070589 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="8eb28176-4988-4dcb-a2b7-20443b38db81" containerName="nova-api-api" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.071911 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.082720 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.086007 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.091839 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.111715 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.182215 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-public-tls-certs\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.182341 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.182381 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-logs\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.182732 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.182798 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-854x7\" (UniqueName: \"kubernetes.io/projected/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-kube-api-access-854x7\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.182837 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-config-data\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.285493 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-public-tls-certs\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.285585 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.285623 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-logs\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.285747 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.285784 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-854x7\" (UniqueName: \"kubernetes.io/projected/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-kube-api-access-854x7\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.285812 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-config-data\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.286738 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-logs\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.290169 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.290374 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-public-tls-certs\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.290546 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.291587 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-config-data\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.303461 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-854x7\" (UniqueName: \"kubernetes.io/projected/30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9-kube-api-access-854x7\") pod \"nova-api-0\" (UID: \"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9\") " pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.399707 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.832334 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca" path="/var/lib/kubelet/pods/5c3d0ed8-8d7f-461c-b5a6-d75dfa2471ca/volumes" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.833865 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8eb28176-4988-4dcb-a2b7-20443b38db81" path="/var/lib/kubelet/pods/8eb28176-4988-4dcb-a2b7-20443b38db81/volumes" Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.862925 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.983598 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9","Type":"ContainerStarted","Data":"4e6fc5dd89ca58cbb2eeb8f9d379194987557c035e1311a63ac47917f751793d"} Nov 25 15:41:57 crc kubenswrapper[4800]: I1125 15:41:57.986337 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6b5fce4f-89c0-47c1-a5c5-c4a86406502d","Type":"ContainerStarted","Data":"dcb0e11ff725dfbba9328209ae81849363aac79683985b299b505ab6c813ec26"} Nov 25 15:41:58 crc kubenswrapper[4800]: I1125 15:41:58.012054 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.012033568 podStartE2EDuration="3.012033568s" podCreationTimestamp="2025-11-25 15:41:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:41:58.006788417 +0000 UTC m=+1479.061196909" watchObservedRunningTime="2025-11-25 15:41:58.012033568 +0000 UTC m=+1479.066442050" Nov 25 15:41:58 crc kubenswrapper[4800]: I1125 15:41:58.996624 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9","Type":"ContainerStarted","Data":"debf673098c42b100c3c5916f5344f9e2fe7dfc531ee24a15cb4e8d1d37a8ddd"} Nov 25 15:41:58 crc kubenswrapper[4800]: I1125 15:41:58.996966 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9","Type":"ContainerStarted","Data":"e76cebb3843ed265e1677cfd3334ba9c09eea91234e8cc9659971f2a1976f82e"} Nov 25 15:41:59 crc kubenswrapper[4800]: I1125 15:41:59.028450 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.028420163 podStartE2EDuration="2.028420163s" podCreationTimestamp="2025-11-25 15:41:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:41:59.014734543 +0000 UTC m=+1480.069143045" watchObservedRunningTime="2025-11-25 15:41:59.028420163 +0000 UTC m=+1480.082828645" Nov 25 15:41:59 crc kubenswrapper[4800]: I1125 15:41:59.619539 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 15:41:59 crc kubenswrapper[4800]: I1125 15:41:59.619616 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 15:42:01 crc kubenswrapper[4800]: I1125 15:42:01.388001 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 15:42:04 crc kubenswrapper[4800]: I1125 15:42:04.619068 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 15:42:04 crc kubenswrapper[4800]: I1125 15:42:04.619439 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 15:42:05 crc kubenswrapper[4800]: I1125 15:42:05.632033 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9192bcbd-cbd0-4697-b97a-ccbd71fccb54" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.189:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 15:42:05 crc kubenswrapper[4800]: I1125 15:42:05.632084 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9192bcbd-cbd0-4697-b97a-ccbd71fccb54" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.189:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.359699 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v6h4s"] Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.375563 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.378463 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v6h4s"] Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.387582 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.404751 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2ca4176-bd74-4d88-9577-d11a5bf36108-catalog-content\") pod \"redhat-operators-v6h4s\" (UID: \"e2ca4176-bd74-4d88-9577-d11a5bf36108\") " pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.404809 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5lrq\" (UniqueName: \"kubernetes.io/projected/e2ca4176-bd74-4d88-9577-d11a5bf36108-kube-api-access-l5lrq\") pod \"redhat-operators-v6h4s\" (UID: \"e2ca4176-bd74-4d88-9577-d11a5bf36108\") " pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.404932 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2ca4176-bd74-4d88-9577-d11a5bf36108-utilities\") pod \"redhat-operators-v6h4s\" (UID: \"e2ca4176-bd74-4d88-9577-d11a5bf36108\") " pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.420311 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.506075 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2ca4176-bd74-4d88-9577-d11a5bf36108-catalog-content\") pod \"redhat-operators-v6h4s\" (UID: \"e2ca4176-bd74-4d88-9577-d11a5bf36108\") " pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.506137 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5lrq\" (UniqueName: \"kubernetes.io/projected/e2ca4176-bd74-4d88-9577-d11a5bf36108-kube-api-access-l5lrq\") pod \"redhat-operators-v6h4s\" (UID: \"e2ca4176-bd74-4d88-9577-d11a5bf36108\") " pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.506409 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2ca4176-bd74-4d88-9577-d11a5bf36108-utilities\") pod \"redhat-operators-v6h4s\" (UID: \"e2ca4176-bd74-4d88-9577-d11a5bf36108\") " pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.506962 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2ca4176-bd74-4d88-9577-d11a5bf36108-utilities\") pod \"redhat-operators-v6h4s\" (UID: \"e2ca4176-bd74-4d88-9577-d11a5bf36108\") " pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.507416 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2ca4176-bd74-4d88-9577-d11a5bf36108-catalog-content\") pod \"redhat-operators-v6h4s\" (UID: \"e2ca4176-bd74-4d88-9577-d11a5bf36108\") " pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.531091 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5lrq\" (UniqueName: \"kubernetes.io/projected/e2ca4176-bd74-4d88-9577-d11a5bf36108-kube-api-access-l5lrq\") pod \"redhat-operators-v6h4s\" (UID: \"e2ca4176-bd74-4d88-9577-d11a5bf36108\") " pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:06 crc kubenswrapper[4800]: I1125 15:42:06.710569 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:07 crc kubenswrapper[4800]: I1125 15:42:07.101604 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 15:42:07 crc kubenswrapper[4800]: I1125 15:42:07.178600 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v6h4s"] Nov 25 15:42:07 crc kubenswrapper[4800]: I1125 15:42:07.400175 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 15:42:07 crc kubenswrapper[4800]: I1125 15:42:07.401957 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 15:42:08 crc kubenswrapper[4800]: I1125 15:42:08.096615 4800 generic.go:334] "Generic (PLEG): container finished" podID="e2ca4176-bd74-4d88-9577-d11a5bf36108" containerID="87ec5827a4d95165b78ac50a6956853f93af05165f999522016f4709922767e3" exitCode=0 Nov 25 15:42:08 crc kubenswrapper[4800]: I1125 15:42:08.098168 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6h4s" event={"ID":"e2ca4176-bd74-4d88-9577-d11a5bf36108","Type":"ContainerDied","Data":"87ec5827a4d95165b78ac50a6956853f93af05165f999522016f4709922767e3"} Nov 25 15:42:08 crc kubenswrapper[4800]: I1125 15:42:08.098351 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6h4s" event={"ID":"e2ca4176-bd74-4d88-9577-d11a5bf36108","Type":"ContainerStarted","Data":"82f89f865541246435e348c4032d89119732a58ebba26d3d4c06e3ecff2959b4"} Nov 25 15:42:08 crc kubenswrapper[4800]: I1125 15:42:08.423379 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.191:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 15:42:08 crc kubenswrapper[4800]: I1125 15:42:08.423413 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.191:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 15:42:09 crc kubenswrapper[4800]: I1125 15:42:09.972049 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 15:42:10 crc kubenswrapper[4800]: I1125 15:42:10.118585 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6h4s" event={"ID":"e2ca4176-bd74-4d88-9577-d11a5bf36108","Type":"ContainerStarted","Data":"e0149c4f340d93e9981d03d5dd365a5754866c2d9c5017f9e3626f0ad7eaf81f"} Nov 25 15:42:12 crc kubenswrapper[4800]: I1125 15:42:12.142062 4800 generic.go:334] "Generic (PLEG): container finished" podID="e2ca4176-bd74-4d88-9577-d11a5bf36108" containerID="e0149c4f340d93e9981d03d5dd365a5754866c2d9c5017f9e3626f0ad7eaf81f" exitCode=0 Nov 25 15:42:12 crc kubenswrapper[4800]: I1125 15:42:12.142167 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6h4s" event={"ID":"e2ca4176-bd74-4d88-9577-d11a5bf36108","Type":"ContainerDied","Data":"e0149c4f340d93e9981d03d5dd365a5754866c2d9c5017f9e3626f0ad7eaf81f"} Nov 25 15:42:13 crc kubenswrapper[4800]: I1125 15:42:13.172648 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6h4s" event={"ID":"e2ca4176-bd74-4d88-9577-d11a5bf36108","Type":"ContainerStarted","Data":"7e05d9201aca6027cb0ab95ac56a1c413c31130054824cf43dc0e2c1ce30e8ea"} Nov 25 15:42:13 crc kubenswrapper[4800]: I1125 15:42:13.205091 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v6h4s" podStartSLOduration=2.734513478 podStartE2EDuration="7.205041355s" podCreationTimestamp="2025-11-25 15:42:06 +0000 UTC" firstStartedPulling="2025-11-25 15:42:08.09919852 +0000 UTC m=+1489.153607002" lastFinishedPulling="2025-11-25 15:42:12.569726367 +0000 UTC m=+1493.624134879" observedRunningTime="2025-11-25 15:42:13.204537081 +0000 UTC m=+1494.258945563" watchObservedRunningTime="2025-11-25 15:42:13.205041355 +0000 UTC m=+1494.259449867" Nov 25 15:42:14 crc kubenswrapper[4800]: I1125 15:42:14.628069 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 15:42:14 crc kubenswrapper[4800]: I1125 15:42:14.630249 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 15:42:14 crc kubenswrapper[4800]: I1125 15:42:14.636256 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 15:42:15 crc kubenswrapper[4800]: I1125 15:42:15.197551 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 15:42:16 crc kubenswrapper[4800]: I1125 15:42:16.712200 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:16 crc kubenswrapper[4800]: I1125 15:42:16.712597 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:17 crc kubenswrapper[4800]: I1125 15:42:17.409375 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 15:42:17 crc kubenswrapper[4800]: I1125 15:42:17.409794 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 15:42:17 crc kubenswrapper[4800]: I1125 15:42:17.411368 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 15:42:17 crc kubenswrapper[4800]: I1125 15:42:17.425045 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 15:42:17 crc kubenswrapper[4800]: I1125 15:42:17.765590 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-v6h4s" podUID="e2ca4176-bd74-4d88-9577-d11a5bf36108" containerName="registry-server" probeResult="failure" output=< Nov 25 15:42:17 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 15:42:17 crc kubenswrapper[4800]: > Nov 25 15:42:18 crc kubenswrapper[4800]: I1125 15:42:18.224321 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 15:42:18 crc kubenswrapper[4800]: I1125 15:42:18.231829 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 15:42:26 crc kubenswrapper[4800]: I1125 15:42:26.763204 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:26 crc kubenswrapper[4800]: I1125 15:42:26.828111 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 15:42:26 crc kubenswrapper[4800]: I1125 15:42:26.831342 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:27 crc kubenswrapper[4800]: I1125 15:42:27.011286 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v6h4s"] Nov 25 15:42:27 crc kubenswrapper[4800]: I1125 15:42:27.667067 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 15:42:28 crc kubenswrapper[4800]: I1125 15:42:28.323244 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-v6h4s" podUID="e2ca4176-bd74-4d88-9577-d11a5bf36108" containerName="registry-server" containerID="cri-o://7e05d9201aca6027cb0ab95ac56a1c413c31130054824cf43dc0e2c1ce30e8ea" gracePeriod=2 Nov 25 15:42:29 crc kubenswrapper[4800]: I1125 15:42:29.338992 4800 generic.go:334] "Generic (PLEG): container finished" podID="e2ca4176-bd74-4d88-9577-d11a5bf36108" containerID="7e05d9201aca6027cb0ab95ac56a1c413c31130054824cf43dc0e2c1ce30e8ea" exitCode=0 Nov 25 15:42:29 crc kubenswrapper[4800]: I1125 15:42:29.339100 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6h4s" event={"ID":"e2ca4176-bd74-4d88-9577-d11a5bf36108","Type":"ContainerDied","Data":"7e05d9201aca6027cb0ab95ac56a1c413c31130054824cf43dc0e2c1ce30e8ea"} Nov 25 15:42:29 crc kubenswrapper[4800]: I1125 15:42:29.467446 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:29 crc kubenswrapper[4800]: I1125 15:42:29.610425 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2ca4176-bd74-4d88-9577-d11a5bf36108-utilities\") pod \"e2ca4176-bd74-4d88-9577-d11a5bf36108\" (UID: \"e2ca4176-bd74-4d88-9577-d11a5bf36108\") " Nov 25 15:42:29 crc kubenswrapper[4800]: I1125 15:42:29.610775 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5lrq\" (UniqueName: \"kubernetes.io/projected/e2ca4176-bd74-4d88-9577-d11a5bf36108-kube-api-access-l5lrq\") pod \"e2ca4176-bd74-4d88-9577-d11a5bf36108\" (UID: \"e2ca4176-bd74-4d88-9577-d11a5bf36108\") " Nov 25 15:42:29 crc kubenswrapper[4800]: I1125 15:42:29.610835 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2ca4176-bd74-4d88-9577-d11a5bf36108-catalog-content\") pod \"e2ca4176-bd74-4d88-9577-d11a5bf36108\" (UID: \"e2ca4176-bd74-4d88-9577-d11a5bf36108\") " Nov 25 15:42:29 crc kubenswrapper[4800]: I1125 15:42:29.611343 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2ca4176-bd74-4d88-9577-d11a5bf36108-utilities" (OuterVolumeSpecName: "utilities") pod "e2ca4176-bd74-4d88-9577-d11a5bf36108" (UID: "e2ca4176-bd74-4d88-9577-d11a5bf36108"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:42:29 crc kubenswrapper[4800]: I1125 15:42:29.636066 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2ca4176-bd74-4d88-9577-d11a5bf36108-kube-api-access-l5lrq" (OuterVolumeSpecName: "kube-api-access-l5lrq") pod "e2ca4176-bd74-4d88-9577-d11a5bf36108" (UID: "e2ca4176-bd74-4d88-9577-d11a5bf36108"). InnerVolumeSpecName "kube-api-access-l5lrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:42:29 crc kubenswrapper[4800]: I1125 15:42:29.712595 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5lrq\" (UniqueName: \"kubernetes.io/projected/e2ca4176-bd74-4d88-9577-d11a5bf36108-kube-api-access-l5lrq\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:29 crc kubenswrapper[4800]: I1125 15:42:29.712638 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2ca4176-bd74-4d88-9577-d11a5bf36108-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:29 crc kubenswrapper[4800]: I1125 15:42:29.775552 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2ca4176-bd74-4d88-9577-d11a5bf36108-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e2ca4176-bd74-4d88-9577-d11a5bf36108" (UID: "e2ca4176-bd74-4d88-9577-d11a5bf36108"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:42:29 crc kubenswrapper[4800]: I1125 15:42:29.814477 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2ca4176-bd74-4d88-9577-d11a5bf36108-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:30 crc kubenswrapper[4800]: I1125 15:42:30.354338 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6h4s" event={"ID":"e2ca4176-bd74-4d88-9577-d11a5bf36108","Type":"ContainerDied","Data":"82f89f865541246435e348c4032d89119732a58ebba26d3d4c06e3ecff2959b4"} Nov 25 15:42:30 crc kubenswrapper[4800]: I1125 15:42:30.354415 4800 scope.go:117] "RemoveContainer" containerID="7e05d9201aca6027cb0ab95ac56a1c413c31130054824cf43dc0e2c1ce30e8ea" Nov 25 15:42:30 crc kubenswrapper[4800]: I1125 15:42:30.354505 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v6h4s" Nov 25 15:42:30 crc kubenswrapper[4800]: I1125 15:42:30.389077 4800 scope.go:117] "RemoveContainer" containerID="e0149c4f340d93e9981d03d5dd365a5754866c2d9c5017f9e3626f0ad7eaf81f" Nov 25 15:42:30 crc kubenswrapper[4800]: I1125 15:42:30.392382 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v6h4s"] Nov 25 15:42:30 crc kubenswrapper[4800]: I1125 15:42:30.435448 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-v6h4s"] Nov 25 15:42:30 crc kubenswrapper[4800]: I1125 15:42:30.439750 4800 scope.go:117] "RemoveContainer" containerID="87ec5827a4d95165b78ac50a6956853f93af05165f999522016f4709922767e3" Nov 25 15:42:31 crc kubenswrapper[4800]: I1125 15:42:31.413933 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="72be41d8-6678-467c-a4d5-c4340e488c1b" containerName="rabbitmq" containerID="cri-o://7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2" gracePeriod=604796 Nov 25 15:42:31 crc kubenswrapper[4800]: I1125 15:42:31.799277 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2ca4176-bd74-4d88-9577-d11a5bf36108" path="/var/lib/kubelet/pods/e2ca4176-bd74-4d88-9577-d11a5bf36108/volumes" Nov 25 15:42:32 crc kubenswrapper[4800]: I1125 15:42:32.509986 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="46b2c800-efef-4668-9a57-c66ff504e0db" containerName="rabbitmq" containerID="cri-o://1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050" gracePeriod=604796 Nov 25 15:42:37 crc kubenswrapper[4800]: I1125 15:42:37.419144 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="72be41d8-6678-467c-a4d5-c4340e488c1b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.97:5671: connect: connection refused" Nov 25 15:42:37 crc kubenswrapper[4800]: I1125 15:42:37.538303 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="46b2c800-efef-4668-9a57-c66ff504e0db" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.252105 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.407306 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2jtc\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-kube-api-access-l2jtc\") pod \"72be41d8-6678-467c-a4d5-c4340e488c1b\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.407393 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-config-data\") pod \"72be41d8-6678-467c-a4d5-c4340e488c1b\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.407531 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-tls\") pod \"72be41d8-6678-467c-a4d5-c4340e488c1b\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.407677 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-confd\") pod \"72be41d8-6678-467c-a4d5-c4340e488c1b\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.407734 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/72be41d8-6678-467c-a4d5-c4340e488c1b-pod-info\") pod \"72be41d8-6678-467c-a4d5-c4340e488c1b\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.407783 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/72be41d8-6678-467c-a4d5-c4340e488c1b-erlang-cookie-secret\") pod \"72be41d8-6678-467c-a4d5-c4340e488c1b\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.407957 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-plugins\") pod \"72be41d8-6678-467c-a4d5-c4340e488c1b\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.408004 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"72be41d8-6678-467c-a4d5-c4340e488c1b\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.408105 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-erlang-cookie\") pod \"72be41d8-6678-467c-a4d5-c4340e488c1b\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.408159 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-plugins-conf\") pod \"72be41d8-6678-467c-a4d5-c4340e488c1b\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.408182 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-server-conf\") pod \"72be41d8-6678-467c-a4d5-c4340e488c1b\" (UID: \"72be41d8-6678-467c-a4d5-c4340e488c1b\") " Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.409061 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "72be41d8-6678-467c-a4d5-c4340e488c1b" (UID: "72be41d8-6678-467c-a4d5-c4340e488c1b"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.409939 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "72be41d8-6678-467c-a4d5-c4340e488c1b" (UID: "72be41d8-6678-467c-a4d5-c4340e488c1b"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.410030 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "72be41d8-6678-467c-a4d5-c4340e488c1b" (UID: "72be41d8-6678-467c-a4d5-c4340e488c1b"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.432640 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "72be41d8-6678-467c-a4d5-c4340e488c1b" (UID: "72be41d8-6678-467c-a4d5-c4340e488c1b"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.434071 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/72be41d8-6678-467c-a4d5-c4340e488c1b-pod-info" (OuterVolumeSpecName: "pod-info") pod "72be41d8-6678-467c-a4d5-c4340e488c1b" (UID: "72be41d8-6678-467c-a4d5-c4340e488c1b"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.434181 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-kube-api-access-l2jtc" (OuterVolumeSpecName: "kube-api-access-l2jtc") pod "72be41d8-6678-467c-a4d5-c4340e488c1b" (UID: "72be41d8-6678-467c-a4d5-c4340e488c1b"). InnerVolumeSpecName "kube-api-access-l2jtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.435167 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "72be41d8-6678-467c-a4d5-c4340e488c1b" (UID: "72be41d8-6678-467c-a4d5-c4340e488c1b"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.436229 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72be41d8-6678-467c-a4d5-c4340e488c1b-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "72be41d8-6678-467c-a4d5-c4340e488c1b" (UID: "72be41d8-6678-467c-a4d5-c4340e488c1b"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.447028 4800 generic.go:334] "Generic (PLEG): container finished" podID="72be41d8-6678-467c-a4d5-c4340e488c1b" containerID="7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2" exitCode=0 Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.447086 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"72be41d8-6678-467c-a4d5-c4340e488c1b","Type":"ContainerDied","Data":"7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2"} Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.447116 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"72be41d8-6678-467c-a4d5-c4340e488c1b","Type":"ContainerDied","Data":"97b208d93931b80cc98ffaeece031a65c5ba3fa51eb0e8cce26411f4931bc9b8"} Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.447134 4800 scope.go:117] "RemoveContainer" containerID="7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.447323 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.448547 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-config-data" (OuterVolumeSpecName: "config-data") pod "72be41d8-6678-467c-a4d5-c4340e488c1b" (UID: "72be41d8-6678-467c-a4d5-c4340e488c1b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.485615 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-server-conf" (OuterVolumeSpecName: "server-conf") pod "72be41d8-6678-467c-a4d5-c4340e488c1b" (UID: "72be41d8-6678-467c-a4d5-c4340e488c1b"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.511529 4800 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/72be41d8-6678-467c-a4d5-c4340e488c1b-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.511573 4800 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/72be41d8-6678-467c-a4d5-c4340e488c1b-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.511583 4800 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.511613 4800 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.511624 4800 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.511634 4800 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.511641 4800 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.511651 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2jtc\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-kube-api-access-l2jtc\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.511660 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72be41d8-6678-467c-a4d5-c4340e488c1b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.511670 4800 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.542796 4800 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.544552 4800 scope.go:117] "RemoveContainer" containerID="6a6b68af0650fee007aae70ba973e29d9cce12f19e8243d8b1dff41faa58f8ca" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.562535 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "72be41d8-6678-467c-a4d5-c4340e488c1b" (UID: "72be41d8-6678-467c-a4d5-c4340e488c1b"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.589637 4800 scope.go:117] "RemoveContainer" containerID="7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2" Nov 25 15:42:38 crc kubenswrapper[4800]: E1125 15:42:38.590229 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2\": container with ID starting with 7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2 not found: ID does not exist" containerID="7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.590298 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2"} err="failed to get container status \"7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2\": rpc error: code = NotFound desc = could not find container \"7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2\": container with ID starting with 7441404e5b73d00ab46d9c0e00dd2d3a7e2d327ece91e12f761376e76cb296f2 not found: ID does not exist" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.590335 4800 scope.go:117] "RemoveContainer" containerID="6a6b68af0650fee007aae70ba973e29d9cce12f19e8243d8b1dff41faa58f8ca" Nov 25 15:42:38 crc kubenswrapper[4800]: E1125 15:42:38.590892 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a6b68af0650fee007aae70ba973e29d9cce12f19e8243d8b1dff41faa58f8ca\": container with ID starting with 6a6b68af0650fee007aae70ba973e29d9cce12f19e8243d8b1dff41faa58f8ca not found: ID does not exist" containerID="6a6b68af0650fee007aae70ba973e29d9cce12f19e8243d8b1dff41faa58f8ca" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.590960 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a6b68af0650fee007aae70ba973e29d9cce12f19e8243d8b1dff41faa58f8ca"} err="failed to get container status \"6a6b68af0650fee007aae70ba973e29d9cce12f19e8243d8b1dff41faa58f8ca\": rpc error: code = NotFound desc = could not find container \"6a6b68af0650fee007aae70ba973e29d9cce12f19e8243d8b1dff41faa58f8ca\": container with ID starting with 6a6b68af0650fee007aae70ba973e29d9cce12f19e8243d8b1dff41faa58f8ca not found: ID does not exist" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.613685 4800 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/72be41d8-6678-467c-a4d5-c4340e488c1b-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.613756 4800 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.787651 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.870484 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.940626 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 15:42:38 crc kubenswrapper[4800]: E1125 15:42:38.941095 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2ca4176-bd74-4d88-9577-d11a5bf36108" containerName="extract-content" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.941109 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2ca4176-bd74-4d88-9577-d11a5bf36108" containerName="extract-content" Nov 25 15:42:38 crc kubenswrapper[4800]: E1125 15:42:38.941125 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72be41d8-6678-467c-a4d5-c4340e488c1b" containerName="setup-container" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.941130 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="72be41d8-6678-467c-a4d5-c4340e488c1b" containerName="setup-container" Nov 25 15:42:38 crc kubenswrapper[4800]: E1125 15:42:38.941163 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72be41d8-6678-467c-a4d5-c4340e488c1b" containerName="rabbitmq" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.941171 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="72be41d8-6678-467c-a4d5-c4340e488c1b" containerName="rabbitmq" Nov 25 15:42:38 crc kubenswrapper[4800]: E1125 15:42:38.941183 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2ca4176-bd74-4d88-9577-d11a5bf36108" containerName="registry-server" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.941190 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2ca4176-bd74-4d88-9577-d11a5bf36108" containerName="registry-server" Nov 25 15:42:38 crc kubenswrapper[4800]: E1125 15:42:38.941201 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2ca4176-bd74-4d88-9577-d11a5bf36108" containerName="extract-utilities" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.941206 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2ca4176-bd74-4d88-9577-d11a5bf36108" containerName="extract-utilities" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.941430 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="72be41d8-6678-467c-a4d5-c4340e488c1b" containerName="rabbitmq" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.941458 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2ca4176-bd74-4d88-9577-d11a5bf36108" containerName="registry-server" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.942538 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.967312 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.967607 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.967763 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.967930 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-9xqzd" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.968044 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.968153 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.968249 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 15:42:38 crc kubenswrapper[4800]: I1125 15:42:38.971599 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.038316 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.038404 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.038444 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.038466 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.038492 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gndl9\" (UniqueName: \"kubernetes.io/projected/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-kube-api-access-gndl9\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.038512 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.038534 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-config-data\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.038567 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-server-conf\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.038595 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-pod-info\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.038631 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.038674 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140098 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140175 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140211 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140244 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140266 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140289 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gndl9\" (UniqueName: \"kubernetes.io/projected/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-kube-api-access-gndl9\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140309 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140328 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-config-data\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140361 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-server-conf\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140383 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-pod-info\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140417 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140883 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.140981 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.141889 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.145591 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.146414 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-config-data\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.147699 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-server-conf\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.151767 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.151892 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.151977 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-pod-info\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.169355 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.169986 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gndl9\" (UniqueName: \"kubernetes.io/projected/46c6538a-1632-4c14-9ef6-3a3e4a15c3d4-kube-api-access-gndl9\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.180519 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4\") " pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.294558 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.458116 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.472576 4800 generic.go:334] "Generic (PLEG): container finished" podID="46b2c800-efef-4668-9a57-c66ff504e0db" containerID="1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050" exitCode=0 Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.472648 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"46b2c800-efef-4668-9a57-c66ff504e0db","Type":"ContainerDied","Data":"1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050"} Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.472692 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"46b2c800-efef-4668-9a57-c66ff504e0db","Type":"ContainerDied","Data":"04857e909d03657d3e3ea0d5c80afd1577dea50e82af70029ce71e48112c853a"} Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.472718 4800 scope.go:117] "RemoveContainer" containerID="1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.472956 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.535165 4800 scope.go:117] "RemoveContainer" containerID="71e2b7cea92e41a8123ca4d13df2f0b15c8131951b2ee7c6fe406d4b808e7552" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.561379 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-tls\") pod \"46b2c800-efef-4668-9a57-c66ff504e0db\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.561518 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-server-conf\") pod \"46b2c800-efef-4668-9a57-c66ff504e0db\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.561551 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"46b2c800-efef-4668-9a57-c66ff504e0db\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.561599 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/46b2c800-efef-4668-9a57-c66ff504e0db-pod-info\") pod \"46b2c800-efef-4668-9a57-c66ff504e0db\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.561632 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-confd\") pod \"46b2c800-efef-4668-9a57-c66ff504e0db\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.561659 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/46b2c800-efef-4668-9a57-c66ff504e0db-erlang-cookie-secret\") pod \"46b2c800-efef-4668-9a57-c66ff504e0db\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.561781 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-config-data\") pod \"46b2c800-efef-4668-9a57-c66ff504e0db\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.561875 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-erlang-cookie\") pod \"46b2c800-efef-4668-9a57-c66ff504e0db\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.561913 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zk8kh\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-kube-api-access-zk8kh\") pod \"46b2c800-efef-4668-9a57-c66ff504e0db\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.561965 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-plugins-conf\") pod \"46b2c800-efef-4668-9a57-c66ff504e0db\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.562006 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-plugins\") pod \"46b2c800-efef-4668-9a57-c66ff504e0db\" (UID: \"46b2c800-efef-4668-9a57-c66ff504e0db\") " Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.562964 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "46b2c800-efef-4668-9a57-c66ff504e0db" (UID: "46b2c800-efef-4668-9a57-c66ff504e0db"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.563736 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "46b2c800-efef-4668-9a57-c66ff504e0db" (UID: "46b2c800-efef-4668-9a57-c66ff504e0db"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.564809 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "46b2c800-efef-4668-9a57-c66ff504e0db" (UID: "46b2c800-efef-4668-9a57-c66ff504e0db"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.565132 4800 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.565162 4800 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.565177 4800 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.573598 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/46b2c800-efef-4668-9a57-c66ff504e0db-pod-info" (OuterVolumeSpecName: "pod-info") pod "46b2c800-efef-4668-9a57-c66ff504e0db" (UID: "46b2c800-efef-4668-9a57-c66ff504e0db"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.574270 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46b2c800-efef-4668-9a57-c66ff504e0db-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "46b2c800-efef-4668-9a57-c66ff504e0db" (UID: "46b2c800-efef-4668-9a57-c66ff504e0db"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.575182 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-kube-api-access-zk8kh" (OuterVolumeSpecName: "kube-api-access-zk8kh") pod "46b2c800-efef-4668-9a57-c66ff504e0db" (UID: "46b2c800-efef-4668-9a57-c66ff504e0db"). InnerVolumeSpecName "kube-api-access-zk8kh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.580511 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "persistence") pod "46b2c800-efef-4668-9a57-c66ff504e0db" (UID: "46b2c800-efef-4668-9a57-c66ff504e0db"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.583253 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "46b2c800-efef-4668-9a57-c66ff504e0db" (UID: "46b2c800-efef-4668-9a57-c66ff504e0db"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.599698 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-config-data" (OuterVolumeSpecName: "config-data") pod "46b2c800-efef-4668-9a57-c66ff504e0db" (UID: "46b2c800-efef-4668-9a57-c66ff504e0db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.611781 4800 scope.go:117] "RemoveContainer" containerID="1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050" Nov 25 15:42:39 crc kubenswrapper[4800]: E1125 15:42:39.612436 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050\": container with ID starting with 1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050 not found: ID does not exist" containerID="1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.612476 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050"} err="failed to get container status \"1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050\": rpc error: code = NotFound desc = could not find container \"1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050\": container with ID starting with 1ef32f72876089eec78b73e74167ad1a3e3639a9072b0c1806d07fd6d8a3e050 not found: ID does not exist" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.612507 4800 scope.go:117] "RemoveContainer" containerID="71e2b7cea92e41a8123ca4d13df2f0b15c8131951b2ee7c6fe406d4b808e7552" Nov 25 15:42:39 crc kubenswrapper[4800]: E1125 15:42:39.613157 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71e2b7cea92e41a8123ca4d13df2f0b15c8131951b2ee7c6fe406d4b808e7552\": container with ID starting with 71e2b7cea92e41a8123ca4d13df2f0b15c8131951b2ee7c6fe406d4b808e7552 not found: ID does not exist" containerID="71e2b7cea92e41a8123ca4d13df2f0b15c8131951b2ee7c6fe406d4b808e7552" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.613183 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71e2b7cea92e41a8123ca4d13df2f0b15c8131951b2ee7c6fe406d4b808e7552"} err="failed to get container status \"71e2b7cea92e41a8123ca4d13df2f0b15c8131951b2ee7c6fe406d4b808e7552\": rpc error: code = NotFound desc = could not find container \"71e2b7cea92e41a8123ca4d13df2f0b15c8131951b2ee7c6fe406d4b808e7552\": container with ID starting with 71e2b7cea92e41a8123ca4d13df2f0b15c8131951b2ee7c6fe406d4b808e7552 not found: ID does not exist" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.635761 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-server-conf" (OuterVolumeSpecName: "server-conf") pod "46b2c800-efef-4668-9a57-c66ff504e0db" (UID: "46b2c800-efef-4668-9a57-c66ff504e0db"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.666507 4800 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.666555 4800 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.666592 4800 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.666603 4800 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/46b2c800-efef-4668-9a57-c66ff504e0db-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.666613 4800 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/46b2c800-efef-4668-9a57-c66ff504e0db-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.666824 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46b2c800-efef-4668-9a57-c66ff504e0db-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.666860 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zk8kh\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-kube-api-access-zk8kh\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.694525 4800 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.715141 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "46b2c800-efef-4668-9a57-c66ff504e0db" (UID: "46b2c800-efef-4668-9a57-c66ff504e0db"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.768822 4800 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.768901 4800 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/46b2c800-efef-4668-9a57-c66ff504e0db-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.806836 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72be41d8-6678-467c-a4d5-c4340e488c1b" path="/var/lib/kubelet/pods/72be41d8-6678-467c-a4d5-c4340e488c1b/volumes" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.824470 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.833163 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.874823 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 15:42:39 crc kubenswrapper[4800]: E1125 15:42:39.875609 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46b2c800-efef-4668-9a57-c66ff504e0db" containerName="rabbitmq" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.875630 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="46b2c800-efef-4668-9a57-c66ff504e0db" containerName="rabbitmq" Nov 25 15:42:39 crc kubenswrapper[4800]: E1125 15:42:39.875647 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46b2c800-efef-4668-9a57-c66ff504e0db" containerName="setup-container" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.875655 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="46b2c800-efef-4668-9a57-c66ff504e0db" containerName="setup-container" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.875899 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="46b2c800-efef-4668-9a57-c66ff504e0db" containerName="rabbitmq" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.877521 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.887306 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.887648 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.887836 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.888309 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.888510 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.888729 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.889261 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-bprtr" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.889552 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.912962 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.972280 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f33262f7-29fd-4207-b465-558a4027c20a-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.972694 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f33262f7-29fd-4207-b465-558a4027c20a-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.972724 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f33262f7-29fd-4207-b465-558a4027c20a-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.972764 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f33262f7-29fd-4207-b465-558a4027c20a-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.972787 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f33262f7-29fd-4207-b465-558a4027c20a-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.972811 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f33262f7-29fd-4207-b465-558a4027c20a-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.972858 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgx2v\" (UniqueName: \"kubernetes.io/projected/f33262f7-29fd-4207-b465-558a4027c20a-kube-api-access-lgx2v\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.972888 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f33262f7-29fd-4207-b465-558a4027c20a-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.972913 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.972931 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f33262f7-29fd-4207-b465-558a4027c20a-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: I1125 15:42:39.972958 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f33262f7-29fd-4207-b465-558a4027c20a-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:39 crc kubenswrapper[4800]: E1125 15:42:39.999836 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod46b2c800_efef_4668_9a57_c66ff504e0db.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod46b2c800_efef_4668_9a57_c66ff504e0db.slice/crio-04857e909d03657d3e3ea0d5c80afd1577dea50e82af70029ce71e48112c853a\": RecentStats: unable to find data in memory cache]" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.074700 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f33262f7-29fd-4207-b465-558a4027c20a-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.074756 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f33262f7-29fd-4207-b465-558a4027c20a-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.074790 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f33262f7-29fd-4207-b465-558a4027c20a-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.074826 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f33262f7-29fd-4207-b465-558a4027c20a-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.074866 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f33262f7-29fd-4207-b465-558a4027c20a-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.074898 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f33262f7-29fd-4207-b465-558a4027c20a-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.074926 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgx2v\" (UniqueName: \"kubernetes.io/projected/f33262f7-29fd-4207-b465-558a4027c20a-kube-api-access-lgx2v\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.074960 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f33262f7-29fd-4207-b465-558a4027c20a-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.074994 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.075018 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f33262f7-29fd-4207-b465-558a4027c20a-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.075060 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f33262f7-29fd-4207-b465-558a4027c20a-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.075460 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f33262f7-29fd-4207-b465-558a4027c20a-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.075600 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f33262f7-29fd-4207-b465-558a4027c20a-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.075905 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.076211 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f33262f7-29fd-4207-b465-558a4027c20a-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.076751 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f33262f7-29fd-4207-b465-558a4027c20a-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.076832 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f33262f7-29fd-4207-b465-558a4027c20a-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.082253 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f33262f7-29fd-4207-b465-558a4027c20a-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.082795 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f33262f7-29fd-4207-b465-558a4027c20a-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.083086 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f33262f7-29fd-4207-b465-558a4027c20a-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.086549 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f33262f7-29fd-4207-b465-558a4027c20a-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.092636 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgx2v\" (UniqueName: \"kubernetes.io/projected/f33262f7-29fd-4207-b465-558a4027c20a-kube-api-access-lgx2v\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.107028 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f33262f7-29fd-4207-b465-558a4027c20a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.197609 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.516023 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4","Type":"ContainerStarted","Data":"b921d66e2d699fd7adcb246ce31283e55ec15160257538e13e9034d3166ed71e"} Nov 25 15:42:40 crc kubenswrapper[4800]: I1125 15:42:40.690876 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 15:42:41 crc kubenswrapper[4800]: I1125 15:42:41.531031 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f33262f7-29fd-4207-b465-558a4027c20a","Type":"ContainerStarted","Data":"e6087478ac1ca7c0da7e6ea5a8defd1c6707ed98ae7371c54b28178af1612e96"} Nov 25 15:42:41 crc kubenswrapper[4800]: I1125 15:42:41.800730 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46b2c800-efef-4668-9a57-c66ff504e0db" path="/var/lib/kubelet/pods/46b2c800-efef-4668-9a57-c66ff504e0db/volumes" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.343004 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-64b6dd64c5-gfjzx"] Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.345986 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.348259 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.365768 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64b6dd64c5-gfjzx"] Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.467670 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-config\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.467760 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-openstack-edpm-ipam\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.467781 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-ovsdbserver-sb\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.468049 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-ovsdbserver-nb\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.468283 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-dns-svc\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.468724 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9qgl\" (UniqueName: \"kubernetes.io/projected/5ca20499-5988-4f38-a5d4-e8c0365eb947-kube-api-access-r9qgl\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.543322 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4","Type":"ContainerStarted","Data":"164af81e68065850b733bcf68dcee8ca9c33d92cb317010892ba7522b315e687"} Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.546802 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f33262f7-29fd-4207-b465-558a4027c20a","Type":"ContainerStarted","Data":"ee16492f1843b23b33611a9ee7912f8cc473f02051ce35d0f772b1772d267130"} Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.571045 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9qgl\" (UniqueName: \"kubernetes.io/projected/5ca20499-5988-4f38-a5d4-e8c0365eb947-kube-api-access-r9qgl\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.571107 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-config\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.571176 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-openstack-edpm-ipam\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.571199 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-ovsdbserver-sb\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.571235 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-ovsdbserver-nb\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.571265 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-dns-svc\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.572433 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-dns-svc\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.573033 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-openstack-edpm-ipam\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.573192 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-config\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.573192 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-ovsdbserver-sb\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.573337 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-ovsdbserver-nb\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.602062 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9qgl\" (UniqueName: \"kubernetes.io/projected/5ca20499-5988-4f38-a5d4-e8c0365eb947-kube-api-access-r9qgl\") pod \"dnsmasq-dns-64b6dd64c5-gfjzx\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:42 crc kubenswrapper[4800]: I1125 15:42:42.703705 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:43 crc kubenswrapper[4800]: I1125 15:42:43.212396 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64b6dd64c5-gfjzx"] Nov 25 15:42:43 crc kubenswrapper[4800]: I1125 15:42:43.559617 4800 generic.go:334] "Generic (PLEG): container finished" podID="5ca20499-5988-4f38-a5d4-e8c0365eb947" containerID="84039baee7fc4eef4c50016fb26f97f4423d33348281b9a16ec0cae69818b455" exitCode=0 Nov 25 15:42:43 crc kubenswrapper[4800]: I1125 15:42:43.559914 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" event={"ID":"5ca20499-5988-4f38-a5d4-e8c0365eb947","Type":"ContainerDied","Data":"84039baee7fc4eef4c50016fb26f97f4423d33348281b9a16ec0cae69818b455"} Nov 25 15:42:43 crc kubenswrapper[4800]: I1125 15:42:43.560473 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" event={"ID":"5ca20499-5988-4f38-a5d4-e8c0365eb947","Type":"ContainerStarted","Data":"b723135ee742f90ce10becd83a939f964546abe9bd2b7d62863d4d66dafec410"} Nov 25 15:42:44 crc kubenswrapper[4800]: I1125 15:42:44.574915 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" event={"ID":"5ca20499-5988-4f38-a5d4-e8c0365eb947","Type":"ContainerStarted","Data":"a669d0840249a2766227e958efc228d46e91e9abccc074917cf3fe3a6d212bc5"} Nov 25 15:42:44 crc kubenswrapper[4800]: I1125 15:42:44.575532 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:44 crc kubenswrapper[4800]: I1125 15:42:44.615412 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" podStartSLOduration=2.615386637 podStartE2EDuration="2.615386637s" podCreationTimestamp="2025-11-25 15:42:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:42:44.60346381 +0000 UTC m=+1525.657872302" watchObservedRunningTime="2025-11-25 15:42:44.615386637 +0000 UTC m=+1525.669795139" Nov 25 15:42:52 crc kubenswrapper[4800]: I1125 15:42:52.706280 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:42:52 crc kubenswrapper[4800]: I1125 15:42:52.807370 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f95c456cf-gvqr6"] Nov 25 15:42:52 crc kubenswrapper[4800]: I1125 15:42:52.808300 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" podUID="bcd422c7-527d-4fe3-802f-72656b4bf034" containerName="dnsmasq-dns" containerID="cri-o://12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6" gracePeriod=10 Nov 25 15:42:52 crc kubenswrapper[4800]: I1125 15:42:52.951150 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" podUID="bcd422c7-527d-4fe3-802f-72656b4bf034" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.185:5353: connect: connection refused" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.033510 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-c58867b6c-fng46"] Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.035512 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.072677 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c58867b6c-fng46"] Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.111721 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-ovsdbserver-nb\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.111774 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-ovsdbserver-sb\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.112297 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-config\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.112329 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2k8c\" (UniqueName: \"kubernetes.io/projected/c9fb8541-9c86-4587-a27d-01ebf680fcc1-kube-api-access-j2k8c\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.112349 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-openstack-edpm-ipam\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.112459 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-dns-svc\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.215458 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-ovsdbserver-nb\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.216071 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-ovsdbserver-sb\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.216115 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-config\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.216144 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2k8c\" (UniqueName: \"kubernetes.io/projected/c9fb8541-9c86-4587-a27d-01ebf680fcc1-kube-api-access-j2k8c\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.216202 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-openstack-edpm-ipam\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.216554 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-ovsdbserver-nb\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.216763 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-config\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.217315 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-ovsdbserver-sb\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.221668 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-openstack-edpm-ipam\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.222968 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-dns-svc\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.223906 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-dns-svc\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.245029 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2k8c\" (UniqueName: \"kubernetes.io/projected/c9fb8541-9c86-4587-a27d-01ebf680fcc1-kube-api-access-j2k8c\") pod \"dnsmasq-dns-c58867b6c-fng46\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.352208 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.379456 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.430329 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-dns-svc\") pod \"bcd422c7-527d-4fe3-802f-72656b4bf034\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.430412 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-config\") pod \"bcd422c7-527d-4fe3-802f-72656b4bf034\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.430455 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-ovsdbserver-nb\") pod \"bcd422c7-527d-4fe3-802f-72656b4bf034\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.430530 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nn48b\" (UniqueName: \"kubernetes.io/projected/bcd422c7-527d-4fe3-802f-72656b4bf034-kube-api-access-nn48b\") pod \"bcd422c7-527d-4fe3-802f-72656b4bf034\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.430770 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-ovsdbserver-sb\") pod \"bcd422c7-527d-4fe3-802f-72656b4bf034\" (UID: \"bcd422c7-527d-4fe3-802f-72656b4bf034\") " Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.437481 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcd422c7-527d-4fe3-802f-72656b4bf034-kube-api-access-nn48b" (OuterVolumeSpecName: "kube-api-access-nn48b") pod "bcd422c7-527d-4fe3-802f-72656b4bf034" (UID: "bcd422c7-527d-4fe3-802f-72656b4bf034"). InnerVolumeSpecName "kube-api-access-nn48b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.520459 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bcd422c7-527d-4fe3-802f-72656b4bf034" (UID: "bcd422c7-527d-4fe3-802f-72656b4bf034"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.533813 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.534134 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nn48b\" (UniqueName: \"kubernetes.io/projected/bcd422c7-527d-4fe3-802f-72656b4bf034-kube-api-access-nn48b\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.570013 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bcd422c7-527d-4fe3-802f-72656b4bf034" (UID: "bcd422c7-527d-4fe3-802f-72656b4bf034"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.570756 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-config" (OuterVolumeSpecName: "config") pod "bcd422c7-527d-4fe3-802f-72656b4bf034" (UID: "bcd422c7-527d-4fe3-802f-72656b4bf034"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.580080 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bcd422c7-527d-4fe3-802f-72656b4bf034" (UID: "bcd422c7-527d-4fe3-802f-72656b4bf034"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.636798 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.637475 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.637493 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcd422c7-527d-4fe3-802f-72656b4bf034-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.670170 4800 generic.go:334] "Generic (PLEG): container finished" podID="bcd422c7-527d-4fe3-802f-72656b4bf034" containerID="12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6" exitCode=0 Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.670218 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" event={"ID":"bcd422c7-527d-4fe3-802f-72656b4bf034","Type":"ContainerDied","Data":"12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6"} Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.670253 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" event={"ID":"bcd422c7-527d-4fe3-802f-72656b4bf034","Type":"ContainerDied","Data":"69970a0e994d6cbb29566556688f5ec2bd0f7b5407d1bce3ea8c3b690abca725"} Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.670272 4800 scope.go:117] "RemoveContainer" containerID="12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.670419 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f95c456cf-gvqr6" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.720277 4800 scope.go:117] "RemoveContainer" containerID="63bca906a893f939a5fc8e681bc28058ed4f1a33d27cbb42f2ed68a78c546957" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.730334 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f95c456cf-gvqr6"] Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.740237 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f95c456cf-gvqr6"] Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.760303 4800 scope.go:117] "RemoveContainer" containerID="12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6" Nov 25 15:42:53 crc kubenswrapper[4800]: E1125 15:42:53.760681 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6\": container with ID starting with 12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6 not found: ID does not exist" containerID="12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.760713 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6"} err="failed to get container status \"12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6\": rpc error: code = NotFound desc = could not find container \"12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6\": container with ID starting with 12a1681d5d7325d8431b4ccee726784bf373bae7e86193fcb44dc89cc66c01e6 not found: ID does not exist" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.760737 4800 scope.go:117] "RemoveContainer" containerID="63bca906a893f939a5fc8e681bc28058ed4f1a33d27cbb42f2ed68a78c546957" Nov 25 15:42:53 crc kubenswrapper[4800]: E1125 15:42:53.760999 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63bca906a893f939a5fc8e681bc28058ed4f1a33d27cbb42f2ed68a78c546957\": container with ID starting with 63bca906a893f939a5fc8e681bc28058ed4f1a33d27cbb42f2ed68a78c546957 not found: ID does not exist" containerID="63bca906a893f939a5fc8e681bc28058ed4f1a33d27cbb42f2ed68a78c546957" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.761022 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63bca906a893f939a5fc8e681bc28058ed4f1a33d27cbb42f2ed68a78c546957"} err="failed to get container status \"63bca906a893f939a5fc8e681bc28058ed4f1a33d27cbb42f2ed68a78c546957\": rpc error: code = NotFound desc = could not find container \"63bca906a893f939a5fc8e681bc28058ed4f1a33d27cbb42f2ed68a78c546957\": container with ID starting with 63bca906a893f939a5fc8e681bc28058ed4f1a33d27cbb42f2ed68a78c546957 not found: ID does not exist" Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.800507 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcd422c7-527d-4fe3-802f-72656b4bf034" path="/var/lib/kubelet/pods/bcd422c7-527d-4fe3-802f-72656b4bf034/volumes" Nov 25 15:42:53 crc kubenswrapper[4800]: W1125 15:42:53.982803 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9fb8541_9c86_4587_a27d_01ebf680fcc1.slice/crio-b423841e63f0ee99f40dee2260eee3396fb0da41141aeb935e0d39b42721bbc8 WatchSource:0}: Error finding container b423841e63f0ee99f40dee2260eee3396fb0da41141aeb935e0d39b42721bbc8: Status 404 returned error can't find the container with id b423841e63f0ee99f40dee2260eee3396fb0da41141aeb935e0d39b42721bbc8 Nov 25 15:42:53 crc kubenswrapper[4800]: I1125 15:42:53.984810 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c58867b6c-fng46"] Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.524116 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4gzq4"] Nov 25 15:42:54 crc kubenswrapper[4800]: E1125 15:42:54.525294 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcd422c7-527d-4fe3-802f-72656b4bf034" containerName="init" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.525505 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcd422c7-527d-4fe3-802f-72656b4bf034" containerName="init" Nov 25 15:42:54 crc kubenswrapper[4800]: E1125 15:42:54.525638 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcd422c7-527d-4fe3-802f-72656b4bf034" containerName="dnsmasq-dns" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.525757 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcd422c7-527d-4fe3-802f-72656b4bf034" containerName="dnsmasq-dns" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.526245 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcd422c7-527d-4fe3-802f-72656b4bf034" containerName="dnsmasq-dns" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.528765 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.546436 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4gzq4"] Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.670035 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd603404-4c6c-4604-b72f-15da0c5faa91-catalog-content\") pod \"community-operators-4gzq4\" (UID: \"cd603404-4c6c-4604-b72f-15da0c5faa91\") " pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.670096 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wl6zq\" (UniqueName: \"kubernetes.io/projected/cd603404-4c6c-4604-b72f-15da0c5faa91-kube-api-access-wl6zq\") pod \"community-operators-4gzq4\" (UID: \"cd603404-4c6c-4604-b72f-15da0c5faa91\") " pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.670286 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd603404-4c6c-4604-b72f-15da0c5faa91-utilities\") pod \"community-operators-4gzq4\" (UID: \"cd603404-4c6c-4604-b72f-15da0c5faa91\") " pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.684325 4800 generic.go:334] "Generic (PLEG): container finished" podID="c9fb8541-9c86-4587-a27d-01ebf680fcc1" containerID="85ae38eba70d0e7b7e670c5e6b9f8bd09439ec586c81738fe4443ff0626d48da" exitCode=0 Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.684477 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c58867b6c-fng46" event={"ID":"c9fb8541-9c86-4587-a27d-01ebf680fcc1","Type":"ContainerDied","Data":"85ae38eba70d0e7b7e670c5e6b9f8bd09439ec586c81738fe4443ff0626d48da"} Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.684527 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c58867b6c-fng46" event={"ID":"c9fb8541-9c86-4587-a27d-01ebf680fcc1","Type":"ContainerStarted","Data":"b423841e63f0ee99f40dee2260eee3396fb0da41141aeb935e0d39b42721bbc8"} Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.773031 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd603404-4c6c-4604-b72f-15da0c5faa91-catalog-content\") pod \"community-operators-4gzq4\" (UID: \"cd603404-4c6c-4604-b72f-15da0c5faa91\") " pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.772294 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd603404-4c6c-4604-b72f-15da0c5faa91-catalog-content\") pod \"community-operators-4gzq4\" (UID: \"cd603404-4c6c-4604-b72f-15da0c5faa91\") " pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.773149 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wl6zq\" (UniqueName: \"kubernetes.io/projected/cd603404-4c6c-4604-b72f-15da0c5faa91-kube-api-access-wl6zq\") pod \"community-operators-4gzq4\" (UID: \"cd603404-4c6c-4604-b72f-15da0c5faa91\") " pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.773999 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd603404-4c6c-4604-b72f-15da0c5faa91-utilities\") pod \"community-operators-4gzq4\" (UID: \"cd603404-4c6c-4604-b72f-15da0c5faa91\") " pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.774473 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd603404-4c6c-4604-b72f-15da0c5faa91-utilities\") pod \"community-operators-4gzq4\" (UID: \"cd603404-4c6c-4604-b72f-15da0c5faa91\") " pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.797244 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wl6zq\" (UniqueName: \"kubernetes.io/projected/cd603404-4c6c-4604-b72f-15da0c5faa91-kube-api-access-wl6zq\") pod \"community-operators-4gzq4\" (UID: \"cd603404-4c6c-4604-b72f-15da0c5faa91\") " pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:42:54 crc kubenswrapper[4800]: I1125 15:42:54.853830 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:42:55 crc kubenswrapper[4800]: I1125 15:42:55.433751 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4gzq4"] Nov 25 15:42:55 crc kubenswrapper[4800]: I1125 15:42:55.700836 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c58867b6c-fng46" event={"ID":"c9fb8541-9c86-4587-a27d-01ebf680fcc1","Type":"ContainerStarted","Data":"e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e"} Nov 25 15:42:55 crc kubenswrapper[4800]: I1125 15:42:55.701044 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:42:55 crc kubenswrapper[4800]: I1125 15:42:55.704530 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4gzq4" event={"ID":"cd603404-4c6c-4604-b72f-15da0c5faa91","Type":"ContainerStarted","Data":"b3a9dc19b0fd564952f03e64f7bbc82055ba5a3abebfe4c4651238b022a4ed14"} Nov 25 15:42:55 crc kubenswrapper[4800]: I1125 15:42:55.736751 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-c58867b6c-fng46" podStartSLOduration=3.736718721 podStartE2EDuration="3.736718721s" podCreationTimestamp="2025-11-25 15:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:42:55.736400723 +0000 UTC m=+1536.790809205" watchObservedRunningTime="2025-11-25 15:42:55.736718721 +0000 UTC m=+1536.791127223" Nov 25 15:42:56 crc kubenswrapper[4800]: I1125 15:42:56.718760 4800 generic.go:334] "Generic (PLEG): container finished" podID="cd603404-4c6c-4604-b72f-15da0c5faa91" containerID="e86dd05541139ace7aacaf2508ed67e7e12761c029d583896f185e2bc22ff223" exitCode=0 Nov 25 15:42:56 crc kubenswrapper[4800]: I1125 15:42:56.720790 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4gzq4" event={"ID":"cd603404-4c6c-4604-b72f-15da0c5faa91","Type":"ContainerDied","Data":"e86dd05541139ace7aacaf2508ed67e7e12761c029d583896f185e2bc22ff223"} Nov 25 15:42:57 crc kubenswrapper[4800]: I1125 15:42:57.738041 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4gzq4" event={"ID":"cd603404-4c6c-4604-b72f-15da0c5faa91","Type":"ContainerStarted","Data":"771f7f56be2d456505b96663328d4887db397c169a6d795ccd9e9f7cd2548819"} Nov 25 15:42:59 crc kubenswrapper[4800]: I1125 15:42:59.764656 4800 generic.go:334] "Generic (PLEG): container finished" podID="cd603404-4c6c-4604-b72f-15da0c5faa91" containerID="771f7f56be2d456505b96663328d4887db397c169a6d795ccd9e9f7cd2548819" exitCode=0 Nov 25 15:42:59 crc kubenswrapper[4800]: I1125 15:42:59.764769 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4gzq4" event={"ID":"cd603404-4c6c-4604-b72f-15da0c5faa91","Type":"ContainerDied","Data":"771f7f56be2d456505b96663328d4887db397c169a6d795ccd9e9f7cd2548819"} Nov 25 15:43:00 crc kubenswrapper[4800]: I1125 15:43:00.784106 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4gzq4" event={"ID":"cd603404-4c6c-4604-b72f-15da0c5faa91","Type":"ContainerStarted","Data":"22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3"} Nov 25 15:43:00 crc kubenswrapper[4800]: I1125 15:43:00.817477 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4gzq4" podStartSLOduration=3.385136526 podStartE2EDuration="6.817445549s" podCreationTimestamp="2025-11-25 15:42:54 +0000 UTC" firstStartedPulling="2025-11-25 15:42:56.721660878 +0000 UTC m=+1537.776069360" lastFinishedPulling="2025-11-25 15:43:00.153969891 +0000 UTC m=+1541.208378383" observedRunningTime="2025-11-25 15:43:00.807182846 +0000 UTC m=+1541.861591338" watchObservedRunningTime="2025-11-25 15:43:00.817445549 +0000 UTC m=+1541.871854031" Nov 25 15:43:03 crc kubenswrapper[4800]: I1125 15:43:03.382048 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 15:43:03 crc kubenswrapper[4800]: I1125 15:43:03.450353 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64b6dd64c5-gfjzx"] Nov 25 15:43:03 crc kubenswrapper[4800]: I1125 15:43:03.450734 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" podUID="5ca20499-5988-4f38-a5d4-e8c0365eb947" containerName="dnsmasq-dns" containerID="cri-o://a669d0840249a2766227e958efc228d46e91e9abccc074917cf3fe3a6d212bc5" gracePeriod=10 Nov 25 15:43:03 crc kubenswrapper[4800]: I1125 15:43:03.881436 4800 generic.go:334] "Generic (PLEG): container finished" podID="5ca20499-5988-4f38-a5d4-e8c0365eb947" containerID="a669d0840249a2766227e958efc228d46e91e9abccc074917cf3fe3a6d212bc5" exitCode=0 Nov 25 15:43:03 crc kubenswrapper[4800]: I1125 15:43:03.881987 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" event={"ID":"5ca20499-5988-4f38-a5d4-e8c0365eb947","Type":"ContainerDied","Data":"a669d0840249a2766227e958efc228d46e91e9abccc074917cf3fe3a6d212bc5"} Nov 25 15:43:03 crc kubenswrapper[4800]: I1125 15:43:03.952051 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.125474 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-ovsdbserver-nb\") pod \"5ca20499-5988-4f38-a5d4-e8c0365eb947\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.126077 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-dns-svc\") pod \"5ca20499-5988-4f38-a5d4-e8c0365eb947\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.126174 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9qgl\" (UniqueName: \"kubernetes.io/projected/5ca20499-5988-4f38-a5d4-e8c0365eb947-kube-api-access-r9qgl\") pod \"5ca20499-5988-4f38-a5d4-e8c0365eb947\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.126236 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-config\") pod \"5ca20499-5988-4f38-a5d4-e8c0365eb947\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.126362 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-ovsdbserver-sb\") pod \"5ca20499-5988-4f38-a5d4-e8c0365eb947\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.126425 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-openstack-edpm-ipam\") pod \"5ca20499-5988-4f38-a5d4-e8c0365eb947\" (UID: \"5ca20499-5988-4f38-a5d4-e8c0365eb947\") " Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.138387 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ca20499-5988-4f38-a5d4-e8c0365eb947-kube-api-access-r9qgl" (OuterVolumeSpecName: "kube-api-access-r9qgl") pod "5ca20499-5988-4f38-a5d4-e8c0365eb947" (UID: "5ca20499-5988-4f38-a5d4-e8c0365eb947"). InnerVolumeSpecName "kube-api-access-r9qgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.196730 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5ca20499-5988-4f38-a5d4-e8c0365eb947" (UID: "5ca20499-5988-4f38-a5d4-e8c0365eb947"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.220665 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5ca20499-5988-4f38-a5d4-e8c0365eb947" (UID: "5ca20499-5988-4f38-a5d4-e8c0365eb947"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.230683 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.230729 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9qgl\" (UniqueName: \"kubernetes.io/projected/5ca20499-5988-4f38-a5d4-e8c0365eb947-kube-api-access-r9qgl\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.230743 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.231487 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "5ca20499-5988-4f38-a5d4-e8c0365eb947" (UID: "5ca20499-5988-4f38-a5d4-e8c0365eb947"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.234322 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-config" (OuterVolumeSpecName: "config") pod "5ca20499-5988-4f38-a5d4-e8c0365eb947" (UID: "5ca20499-5988-4f38-a5d4-e8c0365eb947"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.251581 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5ca20499-5988-4f38-a5d4-e8c0365eb947" (UID: "5ca20499-5988-4f38-a5d4-e8c0365eb947"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.333067 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-config\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.333107 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.333122 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ca20499-5988-4f38-a5d4-e8c0365eb947-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.855180 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.855254 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.894772 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" event={"ID":"5ca20499-5988-4f38-a5d4-e8c0365eb947","Type":"ContainerDied","Data":"b723135ee742f90ce10becd83a939f964546abe9bd2b7d62863d4d66dafec410"} Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.894845 4800 scope.go:117] "RemoveContainer" containerID="a669d0840249a2766227e958efc228d46e91e9abccc074917cf3fe3a6d212bc5" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.895026 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64b6dd64c5-gfjzx" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.915344 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.931752 4800 scope.go:117] "RemoveContainer" containerID="84039baee7fc4eef4c50016fb26f97f4423d33348281b9a16ec0cae69818b455" Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.974116 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64b6dd64c5-gfjzx"] Nov 25 15:43:04 crc kubenswrapper[4800]: I1125 15:43:04.982359 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-64b6dd64c5-gfjzx"] Nov 25 15:43:05 crc kubenswrapper[4800]: I1125 15:43:05.796876 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ca20499-5988-4f38-a5d4-e8c0365eb947" path="/var/lib/kubelet/pods/5ca20499-5988-4f38-a5d4-e8c0365eb947/volumes" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.499612 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc"] Nov 25 15:43:13 crc kubenswrapper[4800]: E1125 15:43:13.501530 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ca20499-5988-4f38-a5d4-e8c0365eb947" containerName="dnsmasq-dns" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.501552 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ca20499-5988-4f38-a5d4-e8c0365eb947" containerName="dnsmasq-dns" Nov 25 15:43:13 crc kubenswrapper[4800]: E1125 15:43:13.501578 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ca20499-5988-4f38-a5d4-e8c0365eb947" containerName="init" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.501587 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ca20499-5988-4f38-a5d4-e8c0365eb947" containerName="init" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.501785 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ca20499-5988-4f38-a5d4-e8c0365eb947" containerName="dnsmasq-dns" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.503231 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.506362 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.506629 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.509392 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.510309 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.517260 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc"] Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.672788 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.672952 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6dd7\" (UniqueName: \"kubernetes.io/projected/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-kube-api-access-f6dd7\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.673147 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.673246 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.774522 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.774618 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.774650 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6dd7\" (UniqueName: \"kubernetes.io/projected/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-kube-api-access-f6dd7\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.774722 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.783663 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.783835 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.784675 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.798602 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6dd7\" (UniqueName: \"kubernetes.io/projected/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-kube-api-access-f6dd7\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:13 crc kubenswrapper[4800]: I1125 15:43:13.830342 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:14 crc kubenswrapper[4800]: I1125 15:43:14.425258 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc"] Nov 25 15:43:14 crc kubenswrapper[4800]: W1125 15:43:14.430202 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode48dfcf5_6a27_44b9_aca0_56a9411ae73e.slice/crio-82589ea618b59fbea1e882032d411077b08522e770ae1680f32dedf8c276cb1d WatchSource:0}: Error finding container 82589ea618b59fbea1e882032d411077b08522e770ae1680f32dedf8c276cb1d: Status 404 returned error can't find the container with id 82589ea618b59fbea1e882032d411077b08522e770ae1680f32dedf8c276cb1d Nov 25 15:43:14 crc kubenswrapper[4800]: I1125 15:43:14.915658 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:43:14 crc kubenswrapper[4800]: I1125 15:43:14.988298 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4gzq4"] Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.024987 4800 generic.go:334] "Generic (PLEG): container finished" podID="46c6538a-1632-4c14-9ef6-3a3e4a15c3d4" containerID="164af81e68065850b733bcf68dcee8ca9c33d92cb317010892ba7522b315e687" exitCode=0 Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.025078 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4","Type":"ContainerDied","Data":"164af81e68065850b733bcf68dcee8ca9c33d92cb317010892ba7522b315e687"} Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.029399 4800 generic.go:334] "Generic (PLEG): container finished" podID="f33262f7-29fd-4207-b465-558a4027c20a" containerID="ee16492f1843b23b33611a9ee7912f8cc473f02051ce35d0f772b1772d267130" exitCode=0 Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.029431 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f33262f7-29fd-4207-b465-558a4027c20a","Type":"ContainerDied","Data":"ee16492f1843b23b33611a9ee7912f8cc473f02051ce35d0f772b1772d267130"} Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.035072 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4gzq4" podUID="cd603404-4c6c-4604-b72f-15da0c5faa91" containerName="registry-server" containerID="cri-o://22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3" gracePeriod=2 Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.035208 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" event={"ID":"e48dfcf5-6a27-44b9-aca0-56a9411ae73e","Type":"ContainerStarted","Data":"82589ea618b59fbea1e882032d411077b08522e770ae1680f32dedf8c276cb1d"} Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.615170 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.731902 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wl6zq\" (UniqueName: \"kubernetes.io/projected/cd603404-4c6c-4604-b72f-15da0c5faa91-kube-api-access-wl6zq\") pod \"cd603404-4c6c-4604-b72f-15da0c5faa91\" (UID: \"cd603404-4c6c-4604-b72f-15da0c5faa91\") " Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.732084 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd603404-4c6c-4604-b72f-15da0c5faa91-utilities\") pod \"cd603404-4c6c-4604-b72f-15da0c5faa91\" (UID: \"cd603404-4c6c-4604-b72f-15da0c5faa91\") " Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.732127 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd603404-4c6c-4604-b72f-15da0c5faa91-catalog-content\") pod \"cd603404-4c6c-4604-b72f-15da0c5faa91\" (UID: \"cd603404-4c6c-4604-b72f-15da0c5faa91\") " Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.737712 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd603404-4c6c-4604-b72f-15da0c5faa91-utilities" (OuterVolumeSpecName: "utilities") pod "cd603404-4c6c-4604-b72f-15da0c5faa91" (UID: "cd603404-4c6c-4604-b72f-15da0c5faa91"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.743034 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd603404-4c6c-4604-b72f-15da0c5faa91-kube-api-access-wl6zq" (OuterVolumeSpecName: "kube-api-access-wl6zq") pod "cd603404-4c6c-4604-b72f-15da0c5faa91" (UID: "cd603404-4c6c-4604-b72f-15da0c5faa91"). InnerVolumeSpecName "kube-api-access-wl6zq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.793658 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd603404-4c6c-4604-b72f-15da0c5faa91-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cd603404-4c6c-4604-b72f-15da0c5faa91" (UID: "cd603404-4c6c-4604-b72f-15da0c5faa91"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.835958 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wl6zq\" (UniqueName: \"kubernetes.io/projected/cd603404-4c6c-4604-b72f-15da0c5faa91-kube-api-access-wl6zq\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.836088 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd603404-4c6c-4604-b72f-15da0c5faa91-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:15 crc kubenswrapper[4800]: I1125 15:43:15.836105 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd603404-4c6c-4604-b72f-15da0c5faa91-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.050008 4800 generic.go:334] "Generic (PLEG): container finished" podID="cd603404-4c6c-4604-b72f-15da0c5faa91" containerID="22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3" exitCode=0 Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.050197 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4gzq4" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.050691 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4gzq4" event={"ID":"cd603404-4c6c-4604-b72f-15da0c5faa91","Type":"ContainerDied","Data":"22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3"} Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.050777 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4gzq4" event={"ID":"cd603404-4c6c-4604-b72f-15da0c5faa91","Type":"ContainerDied","Data":"b3a9dc19b0fd564952f03e64f7bbc82055ba5a3abebfe4c4651238b022a4ed14"} Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.050807 4800 scope.go:117] "RemoveContainer" containerID="22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.059403 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"46c6538a-1632-4c14-9ef6-3a3e4a15c3d4","Type":"ContainerStarted","Data":"0474921aac5e646d8e8f900f8c5a6e860cb0769abdc733d1f89961cea33d5085"} Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.059864 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.064441 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f33262f7-29fd-4207-b465-558a4027c20a","Type":"ContainerStarted","Data":"dec73473a8c231b0962a00f830f2e04e54a9e5266a6fd8e1a7040db7ea7bde69"} Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.064781 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.098883 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.09883182 podStartE2EDuration="38.09883182s" podCreationTimestamp="2025-11-25 15:42:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:43:16.088383179 +0000 UTC m=+1557.142791661" watchObservedRunningTime="2025-11-25 15:43:16.09883182 +0000 UTC m=+1557.153240302" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.118720 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.11869919 podStartE2EDuration="37.11869919s" podCreationTimestamp="2025-11-25 15:42:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:43:16.116360376 +0000 UTC m=+1557.170768868" watchObservedRunningTime="2025-11-25 15:43:16.11869919 +0000 UTC m=+1557.173107672" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.150346 4800 scope.go:117] "RemoveContainer" containerID="771f7f56be2d456505b96663328d4887db397c169a6d795ccd9e9f7cd2548819" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.160568 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4gzq4"] Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.173351 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4gzq4"] Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.201000 4800 scope.go:117] "RemoveContainer" containerID="e86dd05541139ace7aacaf2508ed67e7e12761c029d583896f185e2bc22ff223" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.228812 4800 scope.go:117] "RemoveContainer" containerID="22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3" Nov 25 15:43:16 crc kubenswrapper[4800]: E1125 15:43:16.229365 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3\": container with ID starting with 22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3 not found: ID does not exist" containerID="22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.229429 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3"} err="failed to get container status \"22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3\": rpc error: code = NotFound desc = could not find container \"22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3\": container with ID starting with 22e1ca4cd7a6fcacb728e3e2e9b5001f4761c366c47983b9ea23f9a7d1d42ac3 not found: ID does not exist" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.229466 4800 scope.go:117] "RemoveContainer" containerID="771f7f56be2d456505b96663328d4887db397c169a6d795ccd9e9f7cd2548819" Nov 25 15:43:16 crc kubenswrapper[4800]: E1125 15:43:16.229871 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"771f7f56be2d456505b96663328d4887db397c169a6d795ccd9e9f7cd2548819\": container with ID starting with 771f7f56be2d456505b96663328d4887db397c169a6d795ccd9e9f7cd2548819 not found: ID does not exist" containerID="771f7f56be2d456505b96663328d4887db397c169a6d795ccd9e9f7cd2548819" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.229909 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"771f7f56be2d456505b96663328d4887db397c169a6d795ccd9e9f7cd2548819"} err="failed to get container status \"771f7f56be2d456505b96663328d4887db397c169a6d795ccd9e9f7cd2548819\": rpc error: code = NotFound desc = could not find container \"771f7f56be2d456505b96663328d4887db397c169a6d795ccd9e9f7cd2548819\": container with ID starting with 771f7f56be2d456505b96663328d4887db397c169a6d795ccd9e9f7cd2548819 not found: ID does not exist" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.229975 4800 scope.go:117] "RemoveContainer" containerID="e86dd05541139ace7aacaf2508ed67e7e12761c029d583896f185e2bc22ff223" Nov 25 15:43:16 crc kubenswrapper[4800]: E1125 15:43:16.230270 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e86dd05541139ace7aacaf2508ed67e7e12761c029d583896f185e2bc22ff223\": container with ID starting with e86dd05541139ace7aacaf2508ed67e7e12761c029d583896f185e2bc22ff223 not found: ID does not exist" containerID="e86dd05541139ace7aacaf2508ed67e7e12761c029d583896f185e2bc22ff223" Nov 25 15:43:16 crc kubenswrapper[4800]: I1125 15:43:16.230298 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e86dd05541139ace7aacaf2508ed67e7e12761c029d583896f185e2bc22ff223"} err="failed to get container status \"e86dd05541139ace7aacaf2508ed67e7e12761c029d583896f185e2bc22ff223\": rpc error: code = NotFound desc = could not find container \"e86dd05541139ace7aacaf2508ed67e7e12761c029d583896f185e2bc22ff223\": container with ID starting with e86dd05541139ace7aacaf2508ed67e7e12761c029d583896f185e2bc22ff223 not found: ID does not exist" Nov 25 15:43:17 crc kubenswrapper[4800]: I1125 15:43:17.802266 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd603404-4c6c-4604-b72f-15da0c5faa91" path="/var/lib/kubelet/pods/cd603404-4c6c-4604-b72f-15da0c5faa91/volumes" Nov 25 15:43:22 crc kubenswrapper[4800]: I1125 15:43:22.676336 4800 scope.go:117] "RemoveContainer" containerID="e5417488c041b5d6fc6bf022eb82f8d1134145d6e160f9a7c16274c8451ef508" Nov 25 15:43:25 crc kubenswrapper[4800]: I1125 15:43:25.074922 4800 scope.go:117] "RemoveContainer" containerID="27e0fdc65dcd334eaeb0738129198eed8d9974e1082689dff75e407ed94c90c1" Nov 25 15:43:25 crc kubenswrapper[4800]: I1125 15:43:25.201675 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:43:26 crc kubenswrapper[4800]: I1125 15:43:26.196081 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" event={"ID":"e48dfcf5-6a27-44b9-aca0-56a9411ae73e","Type":"ContainerStarted","Data":"779d68bb45d152ea615fd5bd2efe6edbb2732b2fe491bf2a81ce4ecea6dd26e3"} Nov 25 15:43:26 crc kubenswrapper[4800]: I1125 15:43:26.227159 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" podStartSLOduration=2.464133949 podStartE2EDuration="13.227137267s" podCreationTimestamp="2025-11-25 15:43:13 +0000 UTC" firstStartedPulling="2025-11-25 15:43:14.434571109 +0000 UTC m=+1555.488979631" lastFinishedPulling="2025-11-25 15:43:25.197574467 +0000 UTC m=+1566.251982949" observedRunningTime="2025-11-25 15:43:26.217562436 +0000 UTC m=+1567.271970968" watchObservedRunningTime="2025-11-25 15:43:26.227137267 +0000 UTC m=+1567.281545749" Nov 25 15:43:29 crc kubenswrapper[4800]: I1125 15:43:29.299119 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 15:43:30 crc kubenswrapper[4800]: I1125 15:43:30.202201 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 15:43:37 crc kubenswrapper[4800]: I1125 15:43:37.323706 4800 generic.go:334] "Generic (PLEG): container finished" podID="e48dfcf5-6a27-44b9-aca0-56a9411ae73e" containerID="779d68bb45d152ea615fd5bd2efe6edbb2732b2fe491bf2a81ce4ecea6dd26e3" exitCode=0 Nov 25 15:43:37 crc kubenswrapper[4800]: I1125 15:43:37.323808 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" event={"ID":"e48dfcf5-6a27-44b9-aca0-56a9411ae73e","Type":"ContainerDied","Data":"779d68bb45d152ea615fd5bd2efe6edbb2732b2fe491bf2a81ce4ecea6dd26e3"} Nov 25 15:43:38 crc kubenswrapper[4800]: I1125 15:43:38.816963 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:38 crc kubenswrapper[4800]: I1125 15:43:38.917457 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-repo-setup-combined-ca-bundle\") pod \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " Nov 25 15:43:38 crc kubenswrapper[4800]: I1125 15:43:38.917605 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-ssh-key\") pod \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " Nov 25 15:43:38 crc kubenswrapper[4800]: I1125 15:43:38.917681 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-inventory\") pod \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " Nov 25 15:43:38 crc kubenswrapper[4800]: I1125 15:43:38.917733 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6dd7\" (UniqueName: \"kubernetes.io/projected/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-kube-api-access-f6dd7\") pod \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\" (UID: \"e48dfcf5-6a27-44b9-aca0-56a9411ae73e\") " Nov 25 15:43:38 crc kubenswrapper[4800]: I1125 15:43:38.924954 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "e48dfcf5-6a27-44b9-aca0-56a9411ae73e" (UID: "e48dfcf5-6a27-44b9-aca0-56a9411ae73e"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:43:38 crc kubenswrapper[4800]: I1125 15:43:38.927960 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-kube-api-access-f6dd7" (OuterVolumeSpecName: "kube-api-access-f6dd7") pod "e48dfcf5-6a27-44b9-aca0-56a9411ae73e" (UID: "e48dfcf5-6a27-44b9-aca0-56a9411ae73e"). InnerVolumeSpecName "kube-api-access-f6dd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:43:38 crc kubenswrapper[4800]: I1125 15:43:38.945376 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-inventory" (OuterVolumeSpecName: "inventory") pod "e48dfcf5-6a27-44b9-aca0-56a9411ae73e" (UID: "e48dfcf5-6a27-44b9-aca0-56a9411ae73e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:43:38 crc kubenswrapper[4800]: I1125 15:43:38.948497 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e48dfcf5-6a27-44b9-aca0-56a9411ae73e" (UID: "e48dfcf5-6a27-44b9-aca0-56a9411ae73e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.022384 4800 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.022461 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.022477 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.022499 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6dd7\" (UniqueName: \"kubernetes.io/projected/e48dfcf5-6a27-44b9-aca0-56a9411ae73e-kube-api-access-f6dd7\") on node \"crc\" DevicePath \"\"" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.349204 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" event={"ID":"e48dfcf5-6a27-44b9-aca0-56a9411ae73e","Type":"ContainerDied","Data":"82589ea618b59fbea1e882032d411077b08522e770ae1680f32dedf8c276cb1d"} Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.349297 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82589ea618b59fbea1e882032d411077b08522e770ae1680f32dedf8c276cb1d" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.349349 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.430990 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np"] Nov 25 15:43:39 crc kubenswrapper[4800]: E1125 15:43:39.432114 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd603404-4c6c-4604-b72f-15da0c5faa91" containerName="extract-content" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.432139 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd603404-4c6c-4604-b72f-15da0c5faa91" containerName="extract-content" Nov 25 15:43:39 crc kubenswrapper[4800]: E1125 15:43:39.432170 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd603404-4c6c-4604-b72f-15da0c5faa91" containerName="extract-utilities" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.432182 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd603404-4c6c-4604-b72f-15da0c5faa91" containerName="extract-utilities" Nov 25 15:43:39 crc kubenswrapper[4800]: E1125 15:43:39.432197 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd603404-4c6c-4604-b72f-15da0c5faa91" containerName="registry-server" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.432204 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd603404-4c6c-4604-b72f-15da0c5faa91" containerName="registry-server" Nov 25 15:43:39 crc kubenswrapper[4800]: E1125 15:43:39.432217 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e48dfcf5-6a27-44b9-aca0-56a9411ae73e" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.432224 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e48dfcf5-6a27-44b9-aca0-56a9411ae73e" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.432473 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd603404-4c6c-4604-b72f-15da0c5faa91" containerName="registry-server" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.432504 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="e48dfcf5-6a27-44b9-aca0-56a9411ae73e" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.433449 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.461154 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.461434 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.461674 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.461805 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.480939 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np"] Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.534267 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.534332 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.534409 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6r7z9\" (UniqueName: \"kubernetes.io/projected/32fce522-1642-4591-8201-17f0fdc8b096-kube-api-access-6r7z9\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.534636 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.636108 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.636158 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.636196 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6r7z9\" (UniqueName: \"kubernetes.io/projected/32fce522-1642-4591-8201-17f0fdc8b096-kube-api-access-6r7z9\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.636299 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.654161 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.654192 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.654259 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.660677 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6r7z9\" (UniqueName: \"kubernetes.io/projected/32fce522-1642-4591-8201-17f0fdc8b096-kube-api-access-6r7z9\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:39 crc kubenswrapper[4800]: I1125 15:43:39.774975 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:43:40 crc kubenswrapper[4800]: I1125 15:43:40.312051 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np"] Nov 25 15:43:40 crc kubenswrapper[4800]: I1125 15:43:40.365267 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" event={"ID":"32fce522-1642-4591-8201-17f0fdc8b096","Type":"ContainerStarted","Data":"5179508b3c044b61ffb17b638f7b31ef2fa4f0b0f4c2094011f549a0b34568ed"} Nov 25 15:43:41 crc kubenswrapper[4800]: I1125 15:43:41.377119 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" event={"ID":"32fce522-1642-4591-8201-17f0fdc8b096","Type":"ContainerStarted","Data":"e7b2915863a018baf9090c0f34f8cdd272994f7fd4e107b64a973a1ff14e72fb"} Nov 25 15:43:41 crc kubenswrapper[4800]: I1125 15:43:41.410286 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" podStartSLOduration=2.008042704 podStartE2EDuration="2.410230491s" podCreationTimestamp="2025-11-25 15:43:39 +0000 UTC" firstStartedPulling="2025-11-25 15:43:40.3261202 +0000 UTC m=+1581.380528682" lastFinishedPulling="2025-11-25 15:43:40.728307977 +0000 UTC m=+1581.782716469" observedRunningTime="2025-11-25 15:43:41.401918908 +0000 UTC m=+1582.456327400" watchObservedRunningTime="2025-11-25 15:43:41.410230491 +0000 UTC m=+1582.464638993" Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.252396 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9xg64"] Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.255151 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.264209 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9xg64"] Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.297166 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd159f31-d27c-4051-87f5-735e17f90d50-catalog-content\") pod \"certified-operators-9xg64\" (UID: \"cd159f31-d27c-4051-87f5-735e17f90d50\") " pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.297283 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd159f31-d27c-4051-87f5-735e17f90d50-utilities\") pod \"certified-operators-9xg64\" (UID: \"cd159f31-d27c-4051-87f5-735e17f90d50\") " pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.297312 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hp7xl\" (UniqueName: \"kubernetes.io/projected/cd159f31-d27c-4051-87f5-735e17f90d50-kube-api-access-hp7xl\") pod \"certified-operators-9xg64\" (UID: \"cd159f31-d27c-4051-87f5-735e17f90d50\") " pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.399157 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd159f31-d27c-4051-87f5-735e17f90d50-utilities\") pod \"certified-operators-9xg64\" (UID: \"cd159f31-d27c-4051-87f5-735e17f90d50\") " pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.399221 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hp7xl\" (UniqueName: \"kubernetes.io/projected/cd159f31-d27c-4051-87f5-735e17f90d50-kube-api-access-hp7xl\") pod \"certified-operators-9xg64\" (UID: \"cd159f31-d27c-4051-87f5-735e17f90d50\") " pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.399363 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd159f31-d27c-4051-87f5-735e17f90d50-catalog-content\") pod \"certified-operators-9xg64\" (UID: \"cd159f31-d27c-4051-87f5-735e17f90d50\") " pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.399823 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd159f31-d27c-4051-87f5-735e17f90d50-utilities\") pod \"certified-operators-9xg64\" (UID: \"cd159f31-d27c-4051-87f5-735e17f90d50\") " pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.399868 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd159f31-d27c-4051-87f5-735e17f90d50-catalog-content\") pod \"certified-operators-9xg64\" (UID: \"cd159f31-d27c-4051-87f5-735e17f90d50\") " pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.431156 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hp7xl\" (UniqueName: \"kubernetes.io/projected/cd159f31-d27c-4051-87f5-735e17f90d50-kube-api-access-hp7xl\") pod \"certified-operators-9xg64\" (UID: \"cd159f31-d27c-4051-87f5-735e17f90d50\") " pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:43:56 crc kubenswrapper[4800]: I1125 15:43:56.613250 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:43:57 crc kubenswrapper[4800]: I1125 15:43:57.114033 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9xg64"] Nov 25 15:43:57 crc kubenswrapper[4800]: W1125 15:43:57.118788 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd159f31_d27c_4051_87f5_735e17f90d50.slice/crio-1040617c518e07ae4b623065211f53775f2992c96084d724a8140b4b9f3e87e5 WatchSource:0}: Error finding container 1040617c518e07ae4b623065211f53775f2992c96084d724a8140b4b9f3e87e5: Status 404 returned error can't find the container with id 1040617c518e07ae4b623065211f53775f2992c96084d724a8140b4b9f3e87e5 Nov 25 15:43:57 crc kubenswrapper[4800]: I1125 15:43:57.541393 4800 generic.go:334] "Generic (PLEG): container finished" podID="cd159f31-d27c-4051-87f5-735e17f90d50" containerID="49d0cddfa2dac50a012f8ec3092285830e557f4363c204597ee629dfc7a69365" exitCode=0 Nov 25 15:43:57 crc kubenswrapper[4800]: I1125 15:43:57.541478 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xg64" event={"ID":"cd159f31-d27c-4051-87f5-735e17f90d50","Type":"ContainerDied","Data":"49d0cddfa2dac50a012f8ec3092285830e557f4363c204597ee629dfc7a69365"} Nov 25 15:43:57 crc kubenswrapper[4800]: I1125 15:43:57.541729 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xg64" event={"ID":"cd159f31-d27c-4051-87f5-735e17f90d50","Type":"ContainerStarted","Data":"1040617c518e07ae4b623065211f53775f2992c96084d724a8140b4b9f3e87e5"} Nov 25 15:43:58 crc kubenswrapper[4800]: I1125 15:43:58.556537 4800 generic.go:334] "Generic (PLEG): container finished" podID="cd159f31-d27c-4051-87f5-735e17f90d50" containerID="02ef06b15b3f66ac1585c6b7b8142ab1b97d24ea43ad519c043e77c877a3f9a7" exitCode=0 Nov 25 15:43:58 crc kubenswrapper[4800]: I1125 15:43:58.556701 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xg64" event={"ID":"cd159f31-d27c-4051-87f5-735e17f90d50","Type":"ContainerDied","Data":"02ef06b15b3f66ac1585c6b7b8142ab1b97d24ea43ad519c043e77c877a3f9a7"} Nov 25 15:44:00 crc kubenswrapper[4800]: I1125 15:44:00.579971 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xg64" event={"ID":"cd159f31-d27c-4051-87f5-735e17f90d50","Type":"ContainerStarted","Data":"2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2"} Nov 25 15:44:00 crc kubenswrapper[4800]: I1125 15:44:00.616091 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9xg64" podStartSLOduration=2.7046357739999998 podStartE2EDuration="4.616070316s" podCreationTimestamp="2025-11-25 15:43:56 +0000 UTC" firstStartedPulling="2025-11-25 15:43:57.543319512 +0000 UTC m=+1598.597727994" lastFinishedPulling="2025-11-25 15:43:59.454754054 +0000 UTC m=+1600.509162536" observedRunningTime="2025-11-25 15:44:00.603485942 +0000 UTC m=+1601.657894424" watchObservedRunningTime="2025-11-25 15:44:00.616070316 +0000 UTC m=+1601.670478798" Nov 25 15:44:06 crc kubenswrapper[4800]: I1125 15:44:06.614074 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:44:06 crc kubenswrapper[4800]: I1125 15:44:06.616021 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:44:06 crc kubenswrapper[4800]: I1125 15:44:06.668694 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:44:06 crc kubenswrapper[4800]: I1125 15:44:06.732206 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:44:06 crc kubenswrapper[4800]: I1125 15:44:06.909307 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9xg64"] Nov 25 15:44:08 crc kubenswrapper[4800]: I1125 15:44:08.670824 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9xg64" podUID="cd159f31-d27c-4051-87f5-735e17f90d50" containerName="registry-server" containerID="cri-o://2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2" gracePeriod=2 Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.152605 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.262623 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd159f31-d27c-4051-87f5-735e17f90d50-utilities\") pod \"cd159f31-d27c-4051-87f5-735e17f90d50\" (UID: \"cd159f31-d27c-4051-87f5-735e17f90d50\") " Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.263094 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd159f31-d27c-4051-87f5-735e17f90d50-catalog-content\") pod \"cd159f31-d27c-4051-87f5-735e17f90d50\" (UID: \"cd159f31-d27c-4051-87f5-735e17f90d50\") " Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.263276 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hp7xl\" (UniqueName: \"kubernetes.io/projected/cd159f31-d27c-4051-87f5-735e17f90d50-kube-api-access-hp7xl\") pod \"cd159f31-d27c-4051-87f5-735e17f90d50\" (UID: \"cd159f31-d27c-4051-87f5-735e17f90d50\") " Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.265210 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd159f31-d27c-4051-87f5-735e17f90d50-utilities" (OuterVolumeSpecName: "utilities") pod "cd159f31-d27c-4051-87f5-735e17f90d50" (UID: "cd159f31-d27c-4051-87f5-735e17f90d50"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.272540 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd159f31-d27c-4051-87f5-735e17f90d50-kube-api-access-hp7xl" (OuterVolumeSpecName: "kube-api-access-hp7xl") pod "cd159f31-d27c-4051-87f5-735e17f90d50" (UID: "cd159f31-d27c-4051-87f5-735e17f90d50"). InnerVolumeSpecName "kube-api-access-hp7xl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.365350 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hp7xl\" (UniqueName: \"kubernetes.io/projected/cd159f31-d27c-4051-87f5-735e17f90d50-kube-api-access-hp7xl\") on node \"crc\" DevicePath \"\"" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.365390 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd159f31-d27c-4051-87f5-735e17f90d50-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.585303 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd159f31-d27c-4051-87f5-735e17f90d50-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cd159f31-d27c-4051-87f5-735e17f90d50" (UID: "cd159f31-d27c-4051-87f5-735e17f90d50"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.671355 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd159f31-d27c-4051-87f5-735e17f90d50-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.684501 4800 generic.go:334] "Generic (PLEG): container finished" podID="cd159f31-d27c-4051-87f5-735e17f90d50" containerID="2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2" exitCode=0 Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.684570 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xg64" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.684581 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xg64" event={"ID":"cd159f31-d27c-4051-87f5-735e17f90d50","Type":"ContainerDied","Data":"2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2"} Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.684656 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xg64" event={"ID":"cd159f31-d27c-4051-87f5-735e17f90d50","Type":"ContainerDied","Data":"1040617c518e07ae4b623065211f53775f2992c96084d724a8140b4b9f3e87e5"} Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.684694 4800 scope.go:117] "RemoveContainer" containerID="2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.729416 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9xg64"] Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.730876 4800 scope.go:117] "RemoveContainer" containerID="02ef06b15b3f66ac1585c6b7b8142ab1b97d24ea43ad519c043e77c877a3f9a7" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.740359 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9xg64"] Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.756144 4800 scope.go:117] "RemoveContainer" containerID="49d0cddfa2dac50a012f8ec3092285830e557f4363c204597ee629dfc7a69365" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.802640 4800 scope.go:117] "RemoveContainer" containerID="2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2" Nov 25 15:44:09 crc kubenswrapper[4800]: E1125 15:44:09.803164 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2\": container with ID starting with 2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2 not found: ID does not exist" containerID="2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.803198 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2"} err="failed to get container status \"2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2\": rpc error: code = NotFound desc = could not find container \"2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2\": container with ID starting with 2d4c2624a6c6064a9ac3eba0ee5df97f8e5135eeb9fd6054e3d78f47f27ba5f2 not found: ID does not exist" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.803251 4800 scope.go:117] "RemoveContainer" containerID="02ef06b15b3f66ac1585c6b7b8142ab1b97d24ea43ad519c043e77c877a3f9a7" Nov 25 15:44:09 crc kubenswrapper[4800]: E1125 15:44:09.803960 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02ef06b15b3f66ac1585c6b7b8142ab1b97d24ea43ad519c043e77c877a3f9a7\": container with ID starting with 02ef06b15b3f66ac1585c6b7b8142ab1b97d24ea43ad519c043e77c877a3f9a7 not found: ID does not exist" containerID="02ef06b15b3f66ac1585c6b7b8142ab1b97d24ea43ad519c043e77c877a3f9a7" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.803988 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02ef06b15b3f66ac1585c6b7b8142ab1b97d24ea43ad519c043e77c877a3f9a7"} err="failed to get container status \"02ef06b15b3f66ac1585c6b7b8142ab1b97d24ea43ad519c043e77c877a3f9a7\": rpc error: code = NotFound desc = could not find container \"02ef06b15b3f66ac1585c6b7b8142ab1b97d24ea43ad519c043e77c877a3f9a7\": container with ID starting with 02ef06b15b3f66ac1585c6b7b8142ab1b97d24ea43ad519c043e77c877a3f9a7 not found: ID does not exist" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.804005 4800 scope.go:117] "RemoveContainer" containerID="49d0cddfa2dac50a012f8ec3092285830e557f4363c204597ee629dfc7a69365" Nov 25 15:44:09 crc kubenswrapper[4800]: E1125 15:44:09.804255 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49d0cddfa2dac50a012f8ec3092285830e557f4363c204597ee629dfc7a69365\": container with ID starting with 49d0cddfa2dac50a012f8ec3092285830e557f4363c204597ee629dfc7a69365 not found: ID does not exist" containerID="49d0cddfa2dac50a012f8ec3092285830e557f4363c204597ee629dfc7a69365" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.804277 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49d0cddfa2dac50a012f8ec3092285830e557f4363c204597ee629dfc7a69365"} err="failed to get container status \"49d0cddfa2dac50a012f8ec3092285830e557f4363c204597ee629dfc7a69365\": rpc error: code = NotFound desc = could not find container \"49d0cddfa2dac50a012f8ec3092285830e557f4363c204597ee629dfc7a69365\": container with ID starting with 49d0cddfa2dac50a012f8ec3092285830e557f4363c204597ee629dfc7a69365 not found: ID does not exist" Nov 25 15:44:09 crc kubenswrapper[4800]: I1125 15:44:09.804911 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd159f31-d27c-4051-87f5-735e17f90d50" path="/var/lib/kubelet/pods/cd159f31-d27c-4051-87f5-735e17f90d50/volumes" Nov 25 15:44:12 crc kubenswrapper[4800]: I1125 15:44:12.640690 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:44:12 crc kubenswrapper[4800]: I1125 15:44:12.641343 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:44:25 crc kubenswrapper[4800]: I1125 15:44:25.285209 4800 scope.go:117] "RemoveContainer" containerID="fc863da79b2daeddca9a76f0a902b0204dda157c4501b808f1b4fa8a7ca04c0c" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.389434 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vkrvj"] Nov 25 15:44:28 crc kubenswrapper[4800]: E1125 15:44:28.390155 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd159f31-d27c-4051-87f5-735e17f90d50" containerName="registry-server" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.390170 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd159f31-d27c-4051-87f5-735e17f90d50" containerName="registry-server" Nov 25 15:44:28 crc kubenswrapper[4800]: E1125 15:44:28.390205 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd159f31-d27c-4051-87f5-735e17f90d50" containerName="extract-utilities" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.390211 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd159f31-d27c-4051-87f5-735e17f90d50" containerName="extract-utilities" Nov 25 15:44:28 crc kubenswrapper[4800]: E1125 15:44:28.390221 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd159f31-d27c-4051-87f5-735e17f90d50" containerName="extract-content" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.390227 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd159f31-d27c-4051-87f5-735e17f90d50" containerName="extract-content" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.390397 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd159f31-d27c-4051-87f5-735e17f90d50" containerName="registry-server" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.391635 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.411791 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vkrvj"] Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.464235 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9436c003-2ea7-45fc-b20c-bb902d165922-utilities\") pod \"redhat-marketplace-vkrvj\" (UID: \"9436c003-2ea7-45fc-b20c-bb902d165922\") " pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.464515 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9436c003-2ea7-45fc-b20c-bb902d165922-catalog-content\") pod \"redhat-marketplace-vkrvj\" (UID: \"9436c003-2ea7-45fc-b20c-bb902d165922\") " pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.464575 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d427c\" (UniqueName: \"kubernetes.io/projected/9436c003-2ea7-45fc-b20c-bb902d165922-kube-api-access-d427c\") pod \"redhat-marketplace-vkrvj\" (UID: \"9436c003-2ea7-45fc-b20c-bb902d165922\") " pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.567366 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9436c003-2ea7-45fc-b20c-bb902d165922-catalog-content\") pod \"redhat-marketplace-vkrvj\" (UID: \"9436c003-2ea7-45fc-b20c-bb902d165922\") " pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.567455 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d427c\" (UniqueName: \"kubernetes.io/projected/9436c003-2ea7-45fc-b20c-bb902d165922-kube-api-access-d427c\") pod \"redhat-marketplace-vkrvj\" (UID: \"9436c003-2ea7-45fc-b20c-bb902d165922\") " pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.567574 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9436c003-2ea7-45fc-b20c-bb902d165922-utilities\") pod \"redhat-marketplace-vkrvj\" (UID: \"9436c003-2ea7-45fc-b20c-bb902d165922\") " pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.568118 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9436c003-2ea7-45fc-b20c-bb902d165922-catalog-content\") pod \"redhat-marketplace-vkrvj\" (UID: \"9436c003-2ea7-45fc-b20c-bb902d165922\") " pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.568200 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9436c003-2ea7-45fc-b20c-bb902d165922-utilities\") pod \"redhat-marketplace-vkrvj\" (UID: \"9436c003-2ea7-45fc-b20c-bb902d165922\") " pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.601857 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d427c\" (UniqueName: \"kubernetes.io/projected/9436c003-2ea7-45fc-b20c-bb902d165922-kube-api-access-d427c\") pod \"redhat-marketplace-vkrvj\" (UID: \"9436c003-2ea7-45fc-b20c-bb902d165922\") " pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:28 crc kubenswrapper[4800]: I1125 15:44:28.727346 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:29 crc kubenswrapper[4800]: I1125 15:44:29.290995 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vkrvj"] Nov 25 15:44:29 crc kubenswrapper[4800]: I1125 15:44:29.905609 4800 generic.go:334] "Generic (PLEG): container finished" podID="9436c003-2ea7-45fc-b20c-bb902d165922" containerID="4020e8ee684bec680a7dc99b65b5b84c8566f4510fd760f7029167fe928159c3" exitCode=0 Nov 25 15:44:29 crc kubenswrapper[4800]: I1125 15:44:29.905674 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vkrvj" event={"ID":"9436c003-2ea7-45fc-b20c-bb902d165922","Type":"ContainerDied","Data":"4020e8ee684bec680a7dc99b65b5b84c8566f4510fd760f7029167fe928159c3"} Nov 25 15:44:29 crc kubenswrapper[4800]: I1125 15:44:29.905730 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vkrvj" event={"ID":"9436c003-2ea7-45fc-b20c-bb902d165922","Type":"ContainerStarted","Data":"9625f94cbfba5d8438028328dc3685328d1c3fa13970e6a0475cba5e3749b68b"} Nov 25 15:44:31 crc kubenswrapper[4800]: I1125 15:44:31.932714 4800 generic.go:334] "Generic (PLEG): container finished" podID="9436c003-2ea7-45fc-b20c-bb902d165922" containerID="3266b57c11cb9d821b0eacb73643f52c13ccd274a0b3ac88754b76b6a56ca662" exitCode=0 Nov 25 15:44:31 crc kubenswrapper[4800]: I1125 15:44:31.932878 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vkrvj" event={"ID":"9436c003-2ea7-45fc-b20c-bb902d165922","Type":"ContainerDied","Data":"3266b57c11cb9d821b0eacb73643f52c13ccd274a0b3ac88754b76b6a56ca662"} Nov 25 15:44:32 crc kubenswrapper[4800]: I1125 15:44:32.951411 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vkrvj" event={"ID":"9436c003-2ea7-45fc-b20c-bb902d165922","Type":"ContainerStarted","Data":"72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e"} Nov 25 15:44:32 crc kubenswrapper[4800]: I1125 15:44:32.988032 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vkrvj" podStartSLOduration=2.543386356 podStartE2EDuration="4.988000238s" podCreationTimestamp="2025-11-25 15:44:28 +0000 UTC" firstStartedPulling="2025-11-25 15:44:29.908646145 +0000 UTC m=+1630.963054627" lastFinishedPulling="2025-11-25 15:44:32.353260027 +0000 UTC m=+1633.407668509" observedRunningTime="2025-11-25 15:44:32.977305597 +0000 UTC m=+1634.031714099" watchObservedRunningTime="2025-11-25 15:44:32.988000238 +0000 UTC m=+1634.042408720" Nov 25 15:44:38 crc kubenswrapper[4800]: I1125 15:44:38.728368 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:38 crc kubenswrapper[4800]: I1125 15:44:38.729129 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:38 crc kubenswrapper[4800]: I1125 15:44:38.778766 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:39 crc kubenswrapper[4800]: I1125 15:44:39.063371 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:39 crc kubenswrapper[4800]: I1125 15:44:39.147116 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vkrvj"] Nov 25 15:44:41 crc kubenswrapper[4800]: I1125 15:44:41.029399 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vkrvj" podUID="9436c003-2ea7-45fc-b20c-bb902d165922" containerName="registry-server" containerID="cri-o://72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e" gracePeriod=2 Nov 25 15:44:41 crc kubenswrapper[4800]: I1125 15:44:41.437546 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:41 crc kubenswrapper[4800]: I1125 15:44:41.568695 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9436c003-2ea7-45fc-b20c-bb902d165922-utilities\") pod \"9436c003-2ea7-45fc-b20c-bb902d165922\" (UID: \"9436c003-2ea7-45fc-b20c-bb902d165922\") " Nov 25 15:44:41 crc kubenswrapper[4800]: I1125 15:44:41.569139 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9436c003-2ea7-45fc-b20c-bb902d165922-catalog-content\") pod \"9436c003-2ea7-45fc-b20c-bb902d165922\" (UID: \"9436c003-2ea7-45fc-b20c-bb902d165922\") " Nov 25 15:44:41 crc kubenswrapper[4800]: I1125 15:44:41.569224 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d427c\" (UniqueName: \"kubernetes.io/projected/9436c003-2ea7-45fc-b20c-bb902d165922-kube-api-access-d427c\") pod \"9436c003-2ea7-45fc-b20c-bb902d165922\" (UID: \"9436c003-2ea7-45fc-b20c-bb902d165922\") " Nov 25 15:44:41 crc kubenswrapper[4800]: I1125 15:44:41.569669 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9436c003-2ea7-45fc-b20c-bb902d165922-utilities" (OuterVolumeSpecName: "utilities") pod "9436c003-2ea7-45fc-b20c-bb902d165922" (UID: "9436c003-2ea7-45fc-b20c-bb902d165922"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:44:41 crc kubenswrapper[4800]: I1125 15:44:41.582095 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9436c003-2ea7-45fc-b20c-bb902d165922-kube-api-access-d427c" (OuterVolumeSpecName: "kube-api-access-d427c") pod "9436c003-2ea7-45fc-b20c-bb902d165922" (UID: "9436c003-2ea7-45fc-b20c-bb902d165922"). InnerVolumeSpecName "kube-api-access-d427c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:44:41 crc kubenswrapper[4800]: I1125 15:44:41.586462 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9436c003-2ea7-45fc-b20c-bb902d165922-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9436c003-2ea7-45fc-b20c-bb902d165922" (UID: "9436c003-2ea7-45fc-b20c-bb902d165922"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:44:41 crc kubenswrapper[4800]: I1125 15:44:41.671343 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9436c003-2ea7-45fc-b20c-bb902d165922-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:44:41 crc kubenswrapper[4800]: I1125 15:44:41.671405 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d427c\" (UniqueName: \"kubernetes.io/projected/9436c003-2ea7-45fc-b20c-bb902d165922-kube-api-access-d427c\") on node \"crc\" DevicePath \"\"" Nov 25 15:44:41 crc kubenswrapper[4800]: I1125 15:44:41.671428 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9436c003-2ea7-45fc-b20c-bb902d165922-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.043507 4800 generic.go:334] "Generic (PLEG): container finished" podID="9436c003-2ea7-45fc-b20c-bb902d165922" containerID="72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e" exitCode=0 Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.043553 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vkrvj" event={"ID":"9436c003-2ea7-45fc-b20c-bb902d165922","Type":"ContainerDied","Data":"72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e"} Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.043615 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vkrvj" event={"ID":"9436c003-2ea7-45fc-b20c-bb902d165922","Type":"ContainerDied","Data":"9625f94cbfba5d8438028328dc3685328d1c3fa13970e6a0475cba5e3749b68b"} Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.043638 4800 scope.go:117] "RemoveContainer" containerID="72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e" Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.043680 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vkrvj" Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.072767 4800 scope.go:117] "RemoveContainer" containerID="3266b57c11cb9d821b0eacb73643f52c13ccd274a0b3ac88754b76b6a56ca662" Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.078901 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vkrvj"] Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.086782 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vkrvj"] Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.095670 4800 scope.go:117] "RemoveContainer" containerID="4020e8ee684bec680a7dc99b65b5b84c8566f4510fd760f7029167fe928159c3" Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.155009 4800 scope.go:117] "RemoveContainer" containerID="72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e" Nov 25 15:44:42 crc kubenswrapper[4800]: E1125 15:44:42.155691 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e\": container with ID starting with 72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e not found: ID does not exist" containerID="72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e" Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.155739 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e"} err="failed to get container status \"72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e\": rpc error: code = NotFound desc = could not find container \"72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e\": container with ID starting with 72292d76e82261c05041c5617451090f3b34f63800cb6f05ceb0fb3debd3de2e not found: ID does not exist" Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.155773 4800 scope.go:117] "RemoveContainer" containerID="3266b57c11cb9d821b0eacb73643f52c13ccd274a0b3ac88754b76b6a56ca662" Nov 25 15:44:42 crc kubenswrapper[4800]: E1125 15:44:42.156196 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3266b57c11cb9d821b0eacb73643f52c13ccd274a0b3ac88754b76b6a56ca662\": container with ID starting with 3266b57c11cb9d821b0eacb73643f52c13ccd274a0b3ac88754b76b6a56ca662 not found: ID does not exist" containerID="3266b57c11cb9d821b0eacb73643f52c13ccd274a0b3ac88754b76b6a56ca662" Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.156233 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3266b57c11cb9d821b0eacb73643f52c13ccd274a0b3ac88754b76b6a56ca662"} err="failed to get container status \"3266b57c11cb9d821b0eacb73643f52c13ccd274a0b3ac88754b76b6a56ca662\": rpc error: code = NotFound desc = could not find container \"3266b57c11cb9d821b0eacb73643f52c13ccd274a0b3ac88754b76b6a56ca662\": container with ID starting with 3266b57c11cb9d821b0eacb73643f52c13ccd274a0b3ac88754b76b6a56ca662 not found: ID does not exist" Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.156257 4800 scope.go:117] "RemoveContainer" containerID="4020e8ee684bec680a7dc99b65b5b84c8566f4510fd760f7029167fe928159c3" Nov 25 15:44:42 crc kubenswrapper[4800]: E1125 15:44:42.156594 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4020e8ee684bec680a7dc99b65b5b84c8566f4510fd760f7029167fe928159c3\": container with ID starting with 4020e8ee684bec680a7dc99b65b5b84c8566f4510fd760f7029167fe928159c3 not found: ID does not exist" containerID="4020e8ee684bec680a7dc99b65b5b84c8566f4510fd760f7029167fe928159c3" Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.156650 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4020e8ee684bec680a7dc99b65b5b84c8566f4510fd760f7029167fe928159c3"} err="failed to get container status \"4020e8ee684bec680a7dc99b65b5b84c8566f4510fd760f7029167fe928159c3\": rpc error: code = NotFound desc = could not find container \"4020e8ee684bec680a7dc99b65b5b84c8566f4510fd760f7029167fe928159c3\": container with ID starting with 4020e8ee684bec680a7dc99b65b5b84c8566f4510fd760f7029167fe928159c3 not found: ID does not exist" Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.640016 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:44:42 crc kubenswrapper[4800]: I1125 15:44:42.640343 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:44:43 crc kubenswrapper[4800]: I1125 15:44:43.796083 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9436c003-2ea7-45fc-b20c-bb902d165922" path="/var/lib/kubelet/pods/9436c003-2ea7-45fc-b20c-bb902d165922/volumes" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.149274 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh"] Nov 25 15:45:00 crc kubenswrapper[4800]: E1125 15:45:00.150531 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9436c003-2ea7-45fc-b20c-bb902d165922" containerName="registry-server" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.150548 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="9436c003-2ea7-45fc-b20c-bb902d165922" containerName="registry-server" Nov 25 15:45:00 crc kubenswrapper[4800]: E1125 15:45:00.150580 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9436c003-2ea7-45fc-b20c-bb902d165922" containerName="extract-utilities" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.150587 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="9436c003-2ea7-45fc-b20c-bb902d165922" containerName="extract-utilities" Nov 25 15:45:00 crc kubenswrapper[4800]: E1125 15:45:00.150612 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9436c003-2ea7-45fc-b20c-bb902d165922" containerName="extract-content" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.150618 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="9436c003-2ea7-45fc-b20c-bb902d165922" containerName="extract-content" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.150820 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="9436c003-2ea7-45fc-b20c-bb902d165922" containerName="registry-server" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.151601 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.153810 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.155522 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.165872 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh"] Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.303075 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-secret-volume\") pod \"collect-profiles-29401425-s5hzh\" (UID: \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.303144 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-config-volume\") pod \"collect-profiles-29401425-s5hzh\" (UID: \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.303196 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rj6dg\" (UniqueName: \"kubernetes.io/projected/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-kube-api-access-rj6dg\") pod \"collect-profiles-29401425-s5hzh\" (UID: \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.405799 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-secret-volume\") pod \"collect-profiles-29401425-s5hzh\" (UID: \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.405893 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-config-volume\") pod \"collect-profiles-29401425-s5hzh\" (UID: \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.405956 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rj6dg\" (UniqueName: \"kubernetes.io/projected/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-kube-api-access-rj6dg\") pod \"collect-profiles-29401425-s5hzh\" (UID: \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.406869 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-config-volume\") pod \"collect-profiles-29401425-s5hzh\" (UID: \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.420826 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-secret-volume\") pod \"collect-profiles-29401425-s5hzh\" (UID: \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.426361 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rj6dg\" (UniqueName: \"kubernetes.io/projected/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-kube-api-access-rj6dg\") pod \"collect-profiles-29401425-s5hzh\" (UID: \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.473900 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:00 crc kubenswrapper[4800]: I1125 15:45:00.956557 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh"] Nov 25 15:45:01 crc kubenswrapper[4800]: I1125 15:45:01.232161 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" event={"ID":"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0","Type":"ContainerStarted","Data":"ea4f67d91d203956267c7b693ba8201eda9a2a3bd866c0c1d0b079997ea8342a"} Nov 25 15:45:01 crc kubenswrapper[4800]: I1125 15:45:01.232207 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" event={"ID":"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0","Type":"ContainerStarted","Data":"5ccdb8085aef64d4af6416ae6e0e46973a7b28535212f38bd5bd66cb3438d801"} Nov 25 15:45:01 crc kubenswrapper[4800]: I1125 15:45:01.258502 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" podStartSLOduration=1.258479866 podStartE2EDuration="1.258479866s" podCreationTimestamp="2025-11-25 15:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 15:45:01.256899532 +0000 UTC m=+1662.311308094" watchObservedRunningTime="2025-11-25 15:45:01.258479866 +0000 UTC m=+1662.312888358" Nov 25 15:45:02 crc kubenswrapper[4800]: I1125 15:45:02.241655 4800 generic.go:334] "Generic (PLEG): container finished" podID="2fe623fd-f427-45ce-a7fd-bb9d5f0062f0" containerID="ea4f67d91d203956267c7b693ba8201eda9a2a3bd866c0c1d0b079997ea8342a" exitCode=0 Nov 25 15:45:02 crc kubenswrapper[4800]: I1125 15:45:02.241718 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" event={"ID":"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0","Type":"ContainerDied","Data":"ea4f67d91d203956267c7b693ba8201eda9a2a3bd866c0c1d0b079997ea8342a"} Nov 25 15:45:03 crc kubenswrapper[4800]: I1125 15:45:03.544798 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:03 crc kubenswrapper[4800]: I1125 15:45:03.671662 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-secret-volume\") pod \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\" (UID: \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\") " Nov 25 15:45:03 crc kubenswrapper[4800]: I1125 15:45:03.671721 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rj6dg\" (UniqueName: \"kubernetes.io/projected/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-kube-api-access-rj6dg\") pod \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\" (UID: \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\") " Nov 25 15:45:03 crc kubenswrapper[4800]: I1125 15:45:03.671962 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-config-volume\") pod \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\" (UID: \"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0\") " Nov 25 15:45:03 crc kubenswrapper[4800]: I1125 15:45:03.672682 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-config-volume" (OuterVolumeSpecName: "config-volume") pod "2fe623fd-f427-45ce-a7fd-bb9d5f0062f0" (UID: "2fe623fd-f427-45ce-a7fd-bb9d5f0062f0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 15:45:03 crc kubenswrapper[4800]: I1125 15:45:03.676900 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-kube-api-access-rj6dg" (OuterVolumeSpecName: "kube-api-access-rj6dg") pod "2fe623fd-f427-45ce-a7fd-bb9d5f0062f0" (UID: "2fe623fd-f427-45ce-a7fd-bb9d5f0062f0"). InnerVolumeSpecName "kube-api-access-rj6dg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:45:03 crc kubenswrapper[4800]: I1125 15:45:03.677943 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2fe623fd-f427-45ce-a7fd-bb9d5f0062f0" (UID: "2fe623fd-f427-45ce-a7fd-bb9d5f0062f0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:45:03 crc kubenswrapper[4800]: I1125 15:45:03.774767 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 15:45:03 crc kubenswrapper[4800]: I1125 15:45:03.774815 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rj6dg\" (UniqueName: \"kubernetes.io/projected/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-kube-api-access-rj6dg\") on node \"crc\" DevicePath \"\"" Nov 25 15:45:03 crc kubenswrapper[4800]: I1125 15:45:03.774827 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 15:45:04 crc kubenswrapper[4800]: I1125 15:45:04.262889 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" event={"ID":"2fe623fd-f427-45ce-a7fd-bb9d5f0062f0","Type":"ContainerDied","Data":"5ccdb8085aef64d4af6416ae6e0e46973a7b28535212f38bd5bd66cb3438d801"} Nov 25 15:45:04 crc kubenswrapper[4800]: I1125 15:45:04.262946 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ccdb8085aef64d4af6416ae6e0e46973a7b28535212f38bd5bd66cb3438d801" Nov 25 15:45:04 crc kubenswrapper[4800]: I1125 15:45:04.263024 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh" Nov 25 15:45:12 crc kubenswrapper[4800]: I1125 15:45:12.639729 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:45:12 crc kubenswrapper[4800]: I1125 15:45:12.641420 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:45:12 crc kubenswrapper[4800]: I1125 15:45:12.641531 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:45:12 crc kubenswrapper[4800]: I1125 15:45:12.642346 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 15:45:12 crc kubenswrapper[4800]: I1125 15:45:12.642490 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" gracePeriod=600 Nov 25 15:45:12 crc kubenswrapper[4800]: E1125 15:45:12.776568 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:45:13 crc kubenswrapper[4800]: I1125 15:45:13.357194 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" exitCode=0 Nov 25 15:45:13 crc kubenswrapper[4800]: I1125 15:45:13.357244 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f"} Nov 25 15:45:13 crc kubenswrapper[4800]: I1125 15:45:13.357284 4800 scope.go:117] "RemoveContainer" containerID="b8afc7cca40a5009587f2c6768805585b09b1bfca0b79d34753356c624725482" Nov 25 15:45:13 crc kubenswrapper[4800]: I1125 15:45:13.358107 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:45:13 crc kubenswrapper[4800]: E1125 15:45:13.358475 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:45:25 crc kubenswrapper[4800]: I1125 15:45:25.391277 4800 scope.go:117] "RemoveContainer" containerID="f672a7685b2cda3a31dc2c11becc300404c3ad83504d7221692a52ca0723354a" Nov 25 15:45:25 crc kubenswrapper[4800]: I1125 15:45:25.429514 4800 scope.go:117] "RemoveContainer" containerID="4841443f209fa509e5de46b7e1e5425f75fb687ac606a5fc0b1d36c07dab811f" Nov 25 15:45:25 crc kubenswrapper[4800]: I1125 15:45:25.505904 4800 scope.go:117] "RemoveContainer" containerID="acd0b4ebc8a0b83be0223d88fc94eec701c5104bbfffc21a72c1d40dde269d66" Nov 25 15:45:28 crc kubenswrapper[4800]: I1125 15:45:28.785680 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:45:28 crc kubenswrapper[4800]: E1125 15:45:28.786279 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:45:41 crc kubenswrapper[4800]: I1125 15:45:41.786331 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:45:41 crc kubenswrapper[4800]: E1125 15:45:41.787156 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:45:54 crc kubenswrapper[4800]: I1125 15:45:54.785651 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:45:54 crc kubenswrapper[4800]: E1125 15:45:54.788399 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:46:07 crc kubenswrapper[4800]: I1125 15:46:07.786417 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:46:07 crc kubenswrapper[4800]: E1125 15:46:07.787217 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:46:18 crc kubenswrapper[4800]: I1125 15:46:18.786891 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:46:18 crc kubenswrapper[4800]: E1125 15:46:18.788052 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:46:30 crc kubenswrapper[4800]: I1125 15:46:30.785680 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:46:30 crc kubenswrapper[4800]: E1125 15:46:30.786669 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:46:45 crc kubenswrapper[4800]: I1125 15:46:45.245580 4800 generic.go:334] "Generic (PLEG): container finished" podID="32fce522-1642-4591-8201-17f0fdc8b096" containerID="e7b2915863a018baf9090c0f34f8cdd272994f7fd4e107b64a973a1ff14e72fb" exitCode=0 Nov 25 15:46:45 crc kubenswrapper[4800]: I1125 15:46:45.245668 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" event={"ID":"32fce522-1642-4591-8201-17f0fdc8b096","Type":"ContainerDied","Data":"e7b2915863a018baf9090c0f34f8cdd272994f7fd4e107b64a973a1ff14e72fb"} Nov 25 15:46:45 crc kubenswrapper[4800]: I1125 15:46:45.786587 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:46:45 crc kubenswrapper[4800]: E1125 15:46:45.787157 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:46:46 crc kubenswrapper[4800]: I1125 15:46:46.773869 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:46:46 crc kubenswrapper[4800]: I1125 15:46:46.961787 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-ssh-key\") pod \"32fce522-1642-4591-8201-17f0fdc8b096\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " Nov 25 15:46:46 crc kubenswrapper[4800]: I1125 15:46:46.961961 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6r7z9\" (UniqueName: \"kubernetes.io/projected/32fce522-1642-4591-8201-17f0fdc8b096-kube-api-access-6r7z9\") pod \"32fce522-1642-4591-8201-17f0fdc8b096\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " Nov 25 15:46:46 crc kubenswrapper[4800]: I1125 15:46:46.962062 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-bootstrap-combined-ca-bundle\") pod \"32fce522-1642-4591-8201-17f0fdc8b096\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " Nov 25 15:46:46 crc kubenswrapper[4800]: I1125 15:46:46.962133 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-inventory\") pod \"32fce522-1642-4591-8201-17f0fdc8b096\" (UID: \"32fce522-1642-4591-8201-17f0fdc8b096\") " Nov 25 15:46:46 crc kubenswrapper[4800]: I1125 15:46:46.968478 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "32fce522-1642-4591-8201-17f0fdc8b096" (UID: "32fce522-1642-4591-8201-17f0fdc8b096"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:46:46 crc kubenswrapper[4800]: I1125 15:46:46.968667 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32fce522-1642-4591-8201-17f0fdc8b096-kube-api-access-6r7z9" (OuterVolumeSpecName: "kube-api-access-6r7z9") pod "32fce522-1642-4591-8201-17f0fdc8b096" (UID: "32fce522-1642-4591-8201-17f0fdc8b096"). InnerVolumeSpecName "kube-api-access-6r7z9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:46:46 crc kubenswrapper[4800]: I1125 15:46:46.992212 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-inventory" (OuterVolumeSpecName: "inventory") pod "32fce522-1642-4591-8201-17f0fdc8b096" (UID: "32fce522-1642-4591-8201-17f0fdc8b096"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:46:46 crc kubenswrapper[4800]: I1125 15:46:46.995337 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "32fce522-1642-4591-8201-17f0fdc8b096" (UID: "32fce522-1642-4591-8201-17f0fdc8b096"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.064624 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.064664 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6r7z9\" (UniqueName: \"kubernetes.io/projected/32fce522-1642-4591-8201-17f0fdc8b096-kube-api-access-6r7z9\") on node \"crc\" DevicePath \"\"" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.064676 4800 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.064687 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/32fce522-1642-4591-8201-17f0fdc8b096-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.266139 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" event={"ID":"32fce522-1642-4591-8201-17f0fdc8b096","Type":"ContainerDied","Data":"5179508b3c044b61ffb17b638f7b31ef2fa4f0b0f4c2094011f549a0b34568ed"} Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.266192 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5179508b3c044b61ffb17b638f7b31ef2fa4f0b0f4c2094011f549a0b34568ed" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.266267 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.374741 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q"] Nov 25 15:46:47 crc kubenswrapper[4800]: E1125 15:46:47.375114 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fe623fd-f427-45ce-a7fd-bb9d5f0062f0" containerName="collect-profiles" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.375127 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fe623fd-f427-45ce-a7fd-bb9d5f0062f0" containerName="collect-profiles" Nov 25 15:46:47 crc kubenswrapper[4800]: E1125 15:46:47.375160 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32fce522-1642-4591-8201-17f0fdc8b096" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.375169 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="32fce522-1642-4591-8201-17f0fdc8b096" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.375337 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fe623fd-f427-45ce-a7fd-bb9d5f0062f0" containerName="collect-profiles" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.375356 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="32fce522-1642-4591-8201-17f0fdc8b096" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.375958 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.382343 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.386555 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.390762 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.390837 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.394431 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q"] Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.577277 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q\" (UID: \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.577354 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqplw\" (UniqueName: \"kubernetes.io/projected/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-kube-api-access-cqplw\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q\" (UID: \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.577410 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q\" (UID: \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.679980 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q\" (UID: \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.680326 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q\" (UID: \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.680404 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqplw\" (UniqueName: \"kubernetes.io/projected/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-kube-api-access-cqplw\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q\" (UID: \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.684340 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q\" (UID: \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.687406 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q\" (UID: \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:46:47 crc kubenswrapper[4800]: I1125 15:46:47.708287 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqplw\" (UniqueName: \"kubernetes.io/projected/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-kube-api-access-cqplw\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q\" (UID: \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:46:48 crc kubenswrapper[4800]: I1125 15:46:48.000139 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:46:48 crc kubenswrapper[4800]: I1125 15:46:48.581826 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q"] Nov 25 15:46:48 crc kubenswrapper[4800]: I1125 15:46:48.583298 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 15:46:49 crc kubenswrapper[4800]: I1125 15:46:49.290883 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" event={"ID":"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b","Type":"ContainerStarted","Data":"2d0481ed397f79eb354baee0d9b0555eeda096466f7ee09cfa28e5b87a2d9cdc"} Nov 25 15:46:50 crc kubenswrapper[4800]: I1125 15:46:50.301442 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" event={"ID":"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b","Type":"ContainerStarted","Data":"00e11517fbfb4bb44a4dc83fc317b10420340fd1db8748869728b3b9db636b88"} Nov 25 15:46:50 crc kubenswrapper[4800]: I1125 15:46:50.329222 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" podStartSLOduration=2.802046384 podStartE2EDuration="3.329198655s" podCreationTimestamp="2025-11-25 15:46:47 +0000 UTC" firstStartedPulling="2025-11-25 15:46:48.583039326 +0000 UTC m=+1769.637447808" lastFinishedPulling="2025-11-25 15:46:49.110191597 +0000 UTC m=+1770.164600079" observedRunningTime="2025-11-25 15:46:50.316704604 +0000 UTC m=+1771.371113086" watchObservedRunningTime="2025-11-25 15:46:50.329198655 +0000 UTC m=+1771.383607137" Nov 25 15:46:59 crc kubenswrapper[4800]: I1125 15:46:59.793758 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:46:59 crc kubenswrapper[4800]: E1125 15:46:59.794302 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:47:11 crc kubenswrapper[4800]: I1125 15:47:11.786985 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:47:11 crc kubenswrapper[4800]: E1125 15:47:11.787870 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:47:26 crc kubenswrapper[4800]: I1125 15:47:26.785434 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:47:26 crc kubenswrapper[4800]: E1125 15:47:26.786538 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:47:34 crc kubenswrapper[4800]: I1125 15:47:34.041245 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-a0ce-account-create-b766g"] Nov 25 15:47:34 crc kubenswrapper[4800]: I1125 15:47:34.053087 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-99n98"] Nov 25 15:47:34 crc kubenswrapper[4800]: I1125 15:47:34.066386 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-96nfb"] Nov 25 15:47:34 crc kubenswrapper[4800]: I1125 15:47:34.075665 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-d636-account-create-6fnz8"] Nov 25 15:47:34 crc kubenswrapper[4800]: I1125 15:47:34.082739 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-99n98"] Nov 25 15:47:34 crc kubenswrapper[4800]: I1125 15:47:34.092907 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-a0ce-account-create-b766g"] Nov 25 15:47:34 crc kubenswrapper[4800]: I1125 15:47:34.117147 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-d636-account-create-6fnz8"] Nov 25 15:47:34 crc kubenswrapper[4800]: I1125 15:47:34.126652 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-96nfb"] Nov 25 15:47:35 crc kubenswrapper[4800]: I1125 15:47:35.033414 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-4503-account-create-mqjsr"] Nov 25 15:47:35 crc kubenswrapper[4800]: I1125 15:47:35.041966 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-w5nq9"] Nov 25 15:47:35 crc kubenswrapper[4800]: I1125 15:47:35.050385 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-w5nq9"] Nov 25 15:47:35 crc kubenswrapper[4800]: I1125 15:47:35.061059 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-4503-account-create-mqjsr"] Nov 25 15:47:35 crc kubenswrapper[4800]: I1125 15:47:35.796101 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="079c90b2-0054-4e36-8836-1e490ce9203c" path="/var/lib/kubelet/pods/079c90b2-0054-4e36-8836-1e490ce9203c/volumes" Nov 25 15:47:35 crc kubenswrapper[4800]: I1125 15:47:35.797093 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3699aa7f-d6e9-45ea-8988-51ab0811f43c" path="/var/lib/kubelet/pods/3699aa7f-d6e9-45ea-8988-51ab0811f43c/volumes" Nov 25 15:47:35 crc kubenswrapper[4800]: I1125 15:47:35.797582 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="780bb63d-407c-41c6-8dc0-6e03a4b904fd" path="/var/lib/kubelet/pods/780bb63d-407c-41c6-8dc0-6e03a4b904fd/volumes" Nov 25 15:47:35 crc kubenswrapper[4800]: I1125 15:47:35.798078 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0bbad3a-0877-42a9-9b3b-8102f399768d" path="/var/lib/kubelet/pods/a0bbad3a-0877-42a9-9b3b-8102f399768d/volumes" Nov 25 15:47:35 crc kubenswrapper[4800]: I1125 15:47:35.799101 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4cb5047-76f0-42c5-91a3-24cf5274f77b" path="/var/lib/kubelet/pods/b4cb5047-76f0-42c5-91a3-24cf5274f77b/volumes" Nov 25 15:47:35 crc kubenswrapper[4800]: I1125 15:47:35.799595 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5212761-3a01-4a92-92bd-bb4f82a0d011" path="/var/lib/kubelet/pods/c5212761-3a01-4a92-92bd-bb4f82a0d011/volumes" Nov 25 15:47:39 crc kubenswrapper[4800]: I1125 15:47:39.792380 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:47:39 crc kubenswrapper[4800]: E1125 15:47:39.792939 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:47:52 crc kubenswrapper[4800]: I1125 15:47:52.786635 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:47:52 crc kubenswrapper[4800]: E1125 15:47:52.787988 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:47:58 crc kubenswrapper[4800]: I1125 15:47:58.933717 4800 generic.go:334] "Generic (PLEG): container finished" podID="b8ed30c2-c85a-41d7-be2b-fa9dfe81547b" containerID="00e11517fbfb4bb44a4dc83fc317b10420340fd1db8748869728b3b9db636b88" exitCode=0 Nov 25 15:47:58 crc kubenswrapper[4800]: I1125 15:47:58.933798 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" event={"ID":"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b","Type":"ContainerDied","Data":"00e11517fbfb4bb44a4dc83fc317b10420340fd1db8748869728b3b9db636b88"} Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.327060 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.420159 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqplw\" (UniqueName: \"kubernetes.io/projected/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-kube-api-access-cqplw\") pod \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\" (UID: \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\") " Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.420370 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-inventory\") pod \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\" (UID: \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\") " Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.420450 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-ssh-key\") pod \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\" (UID: \"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b\") " Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.426007 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-kube-api-access-cqplw" (OuterVolumeSpecName: "kube-api-access-cqplw") pod "b8ed30c2-c85a-41d7-be2b-fa9dfe81547b" (UID: "b8ed30c2-c85a-41d7-be2b-fa9dfe81547b"). InnerVolumeSpecName "kube-api-access-cqplw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.446962 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-inventory" (OuterVolumeSpecName: "inventory") pod "b8ed30c2-c85a-41d7-be2b-fa9dfe81547b" (UID: "b8ed30c2-c85a-41d7-be2b-fa9dfe81547b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.449289 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b8ed30c2-c85a-41d7-be2b-fa9dfe81547b" (UID: "b8ed30c2-c85a-41d7-be2b-fa9dfe81547b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.523700 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.523963 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqplw\" (UniqueName: \"kubernetes.io/projected/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-kube-api-access-cqplw\") on node \"crc\" DevicePath \"\"" Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.524045 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.954257 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" event={"ID":"b8ed30c2-c85a-41d7-be2b-fa9dfe81547b","Type":"ContainerDied","Data":"2d0481ed397f79eb354baee0d9b0555eeda096466f7ee09cfa28e5b87a2d9cdc"} Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.954565 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d0481ed397f79eb354baee0d9b0555eeda096466f7ee09cfa28e5b87a2d9cdc" Nov 25 15:48:00 crc kubenswrapper[4800]: I1125 15:48:00.954369 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.124801 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s"] Nov 25 15:48:01 crc kubenswrapper[4800]: E1125 15:48:01.125572 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8ed30c2-c85a-41d7-be2b-fa9dfe81547b" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.125633 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8ed30c2-c85a-41d7-be2b-fa9dfe81547b" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.125935 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8ed30c2-c85a-41d7-be2b-fa9dfe81547b" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.126885 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.131649 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.132358 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.132969 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.133111 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.137703 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s"] Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.236872 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s28t2\" (UniqueName: \"kubernetes.io/projected/713eb615-1098-49ed-9749-50ba1822b159-kube-api-access-s28t2\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mc24s\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.236985 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mc24s\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.237517 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mc24s\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.339519 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s28t2\" (UniqueName: \"kubernetes.io/projected/713eb615-1098-49ed-9749-50ba1822b159-kube-api-access-s28t2\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mc24s\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.339628 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mc24s\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.339665 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mc24s\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.349309 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mc24s\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.349645 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mc24s\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.362149 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s28t2\" (UniqueName: \"kubernetes.io/projected/713eb615-1098-49ed-9749-50ba1822b159-kube-api-access-s28t2\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mc24s\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.447305 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.943160 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s"] Nov 25 15:48:01 crc kubenswrapper[4800]: I1125 15:48:01.963019 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" event={"ID":"713eb615-1098-49ed-9749-50ba1822b159","Type":"ContainerStarted","Data":"e10c76994e56af8562b47590629c18137f5d7241cbf534fed3bb01701a4be311"} Nov 25 15:48:02 crc kubenswrapper[4800]: I1125 15:48:02.976122 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" event={"ID":"713eb615-1098-49ed-9749-50ba1822b159","Type":"ContainerStarted","Data":"5eb27ce4a3611659cfd85bece4dcd20e3256ae3071bd869d0137211a0ff3a1e6"} Nov 25 15:48:03 crc kubenswrapper[4800]: I1125 15:48:03.004355 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" podStartSLOduration=1.339584117 podStartE2EDuration="2.004322321s" podCreationTimestamp="2025-11-25 15:48:01 +0000 UTC" firstStartedPulling="2025-11-25 15:48:01.950390441 +0000 UTC m=+1843.004798923" lastFinishedPulling="2025-11-25 15:48:02.615128645 +0000 UTC m=+1843.669537127" observedRunningTime="2025-11-25 15:48:02.999249543 +0000 UTC m=+1844.053658035" watchObservedRunningTime="2025-11-25 15:48:03.004322321 +0000 UTC m=+1844.058730813" Nov 25 15:48:04 crc kubenswrapper[4800]: I1125 15:48:04.786357 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:48:04 crc kubenswrapper[4800]: E1125 15:48:04.788196 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:48:06 crc kubenswrapper[4800]: I1125 15:48:06.053331 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-6e3d-account-create-pkf44"] Nov 25 15:48:06 crc kubenswrapper[4800]: I1125 15:48:06.064773 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-h5lb7"] Nov 25 15:48:06 crc kubenswrapper[4800]: I1125 15:48:06.080954 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-2ffe-account-create-cxs5m"] Nov 25 15:48:06 crc kubenswrapper[4800]: I1125 15:48:06.090480 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-2ffe-account-create-cxs5m"] Nov 25 15:48:06 crc kubenswrapper[4800]: I1125 15:48:06.100533 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-6e3d-account-create-pkf44"] Nov 25 15:48:06 crc kubenswrapper[4800]: I1125 15:48:06.108183 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-bcj2h"] Nov 25 15:48:06 crc kubenswrapper[4800]: I1125 15:48:06.116516 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-h5lb7"] Nov 25 15:48:06 crc kubenswrapper[4800]: I1125 15:48:06.125127 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-bcj2h"] Nov 25 15:48:07 crc kubenswrapper[4800]: I1125 15:48:07.037060 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-thkrz"] Nov 25 15:48:07 crc kubenswrapper[4800]: I1125 15:48:07.045080 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-0885-account-create-jgg7v"] Nov 25 15:48:07 crc kubenswrapper[4800]: I1125 15:48:07.052892 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-thkrz"] Nov 25 15:48:07 crc kubenswrapper[4800]: I1125 15:48:07.061872 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-0885-account-create-jgg7v"] Nov 25 15:48:07 crc kubenswrapper[4800]: I1125 15:48:07.796059 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="365fdcd1-ee57-49e4-819f-e8c567c99001" path="/var/lib/kubelet/pods/365fdcd1-ee57-49e4-819f-e8c567c99001/volumes" Nov 25 15:48:07 crc kubenswrapper[4800]: I1125 15:48:07.797161 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68fc1e6f-3787-4d76-9ed2-701a8170a037" path="/var/lib/kubelet/pods/68fc1e6f-3787-4d76-9ed2-701a8170a037/volumes" Nov 25 15:48:07 crc kubenswrapper[4800]: I1125 15:48:07.797702 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78f8b613-903f-45b0-bf62-176546fd4f72" path="/var/lib/kubelet/pods/78f8b613-903f-45b0-bf62-176546fd4f72/volumes" Nov 25 15:48:07 crc kubenswrapper[4800]: I1125 15:48:07.798198 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="868262b9-c453-46a0-8885-1ccf13e06e98" path="/var/lib/kubelet/pods/868262b9-c453-46a0-8885-1ccf13e06e98/volumes" Nov 25 15:48:07 crc kubenswrapper[4800]: I1125 15:48:07.799293 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da401e63-0bdb-4057-844e-c5938c5d9a98" path="/var/lib/kubelet/pods/da401e63-0bdb-4057-844e-c5938c5d9a98/volumes" Nov 25 15:48:07 crc kubenswrapper[4800]: I1125 15:48:07.799810 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb6483f1-4969-48cf-814f-ecdd47c261ec" path="/var/lib/kubelet/pods/fb6483f1-4969-48cf-814f-ecdd47c261ec/volumes" Nov 25 15:48:08 crc kubenswrapper[4800]: I1125 15:48:08.028500 4800 generic.go:334] "Generic (PLEG): container finished" podID="713eb615-1098-49ed-9749-50ba1822b159" containerID="5eb27ce4a3611659cfd85bece4dcd20e3256ae3071bd869d0137211a0ff3a1e6" exitCode=0 Nov 25 15:48:08 crc kubenswrapper[4800]: I1125 15:48:08.028547 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" event={"ID":"713eb615-1098-49ed-9749-50ba1822b159","Type":"ContainerDied","Data":"5eb27ce4a3611659cfd85bece4dcd20e3256ae3071bd869d0137211a0ff3a1e6"} Nov 25 15:48:09 crc kubenswrapper[4800]: I1125 15:48:09.450992 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:09 crc kubenswrapper[4800]: I1125 15:48:09.600774 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-inventory\") pod \"713eb615-1098-49ed-9749-50ba1822b159\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " Nov 25 15:48:09 crc kubenswrapper[4800]: I1125 15:48:09.601364 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s28t2\" (UniqueName: \"kubernetes.io/projected/713eb615-1098-49ed-9749-50ba1822b159-kube-api-access-s28t2\") pod \"713eb615-1098-49ed-9749-50ba1822b159\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " Nov 25 15:48:09 crc kubenswrapper[4800]: I1125 15:48:09.601509 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-ssh-key\") pod \"713eb615-1098-49ed-9749-50ba1822b159\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " Nov 25 15:48:09 crc kubenswrapper[4800]: I1125 15:48:09.613106 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/713eb615-1098-49ed-9749-50ba1822b159-kube-api-access-s28t2" (OuterVolumeSpecName: "kube-api-access-s28t2") pod "713eb615-1098-49ed-9749-50ba1822b159" (UID: "713eb615-1098-49ed-9749-50ba1822b159"). InnerVolumeSpecName "kube-api-access-s28t2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:48:09 crc kubenswrapper[4800]: E1125 15:48:09.626373 4800 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-ssh-key podName:713eb615-1098-49ed-9749-50ba1822b159 nodeName:}" failed. No retries permitted until 2025-11-25 15:48:10.126344537 +0000 UTC m=+1851.180753019 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ssh-key" (UniqueName: "kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-ssh-key") pod "713eb615-1098-49ed-9749-50ba1822b159" (UID: "713eb615-1098-49ed-9749-50ba1822b159") : error deleting /var/lib/kubelet/pods/713eb615-1098-49ed-9749-50ba1822b159/volume-subpaths: remove /var/lib/kubelet/pods/713eb615-1098-49ed-9749-50ba1822b159/volume-subpaths: no such file or directory Nov 25 15:48:09 crc kubenswrapper[4800]: I1125 15:48:09.628740 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-inventory" (OuterVolumeSpecName: "inventory") pod "713eb615-1098-49ed-9749-50ba1822b159" (UID: "713eb615-1098-49ed-9749-50ba1822b159"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:48:09 crc kubenswrapper[4800]: I1125 15:48:09.703555 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:48:09 crc kubenswrapper[4800]: I1125 15:48:09.703588 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s28t2\" (UniqueName: \"kubernetes.io/projected/713eb615-1098-49ed-9749-50ba1822b159-kube-api-access-s28t2\") on node \"crc\" DevicePath \"\"" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.053026 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" event={"ID":"713eb615-1098-49ed-9749-50ba1822b159","Type":"ContainerDied","Data":"e10c76994e56af8562b47590629c18137f5d7241cbf534fed3bb01701a4be311"} Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.053070 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e10c76994e56af8562b47590629c18137f5d7241cbf534fed3bb01701a4be311" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.053230 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.127095 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745"] Nov 25 15:48:10 crc kubenswrapper[4800]: E1125 15:48:10.127483 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="713eb615-1098-49ed-9749-50ba1822b159" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.127500 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="713eb615-1098-49ed-9749-50ba1822b159" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.127692 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="713eb615-1098-49ed-9749-50ba1822b159" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.128407 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.143695 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745"] Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.213985 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-ssh-key\") pod \"713eb615-1098-49ed-9749-50ba1822b159\" (UID: \"713eb615-1098-49ed-9749-50ba1822b159\") " Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.214511 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/37c204e4-e594-4a65-bdee-67202c8847fd-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-bt745\" (UID: \"37c204e4-e594-4a65-bdee-67202c8847fd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.214605 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5v9jn\" (UniqueName: \"kubernetes.io/projected/37c204e4-e594-4a65-bdee-67202c8847fd-kube-api-access-5v9jn\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-bt745\" (UID: \"37c204e4-e594-4a65-bdee-67202c8847fd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.215226 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/37c204e4-e594-4a65-bdee-67202c8847fd-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-bt745\" (UID: \"37c204e4-e594-4a65-bdee-67202c8847fd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.218525 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "713eb615-1098-49ed-9749-50ba1822b159" (UID: "713eb615-1098-49ed-9749-50ba1822b159"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.317897 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/37c204e4-e594-4a65-bdee-67202c8847fd-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-bt745\" (UID: \"37c204e4-e594-4a65-bdee-67202c8847fd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.317989 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/37c204e4-e594-4a65-bdee-67202c8847fd-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-bt745\" (UID: \"37c204e4-e594-4a65-bdee-67202c8847fd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.318057 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5v9jn\" (UniqueName: \"kubernetes.io/projected/37c204e4-e594-4a65-bdee-67202c8847fd-kube-api-access-5v9jn\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-bt745\" (UID: \"37c204e4-e594-4a65-bdee-67202c8847fd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.318178 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/713eb615-1098-49ed-9749-50ba1822b159-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.322345 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/37c204e4-e594-4a65-bdee-67202c8847fd-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-bt745\" (UID: \"37c204e4-e594-4a65-bdee-67202c8847fd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.322356 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/37c204e4-e594-4a65-bdee-67202c8847fd-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-bt745\" (UID: \"37c204e4-e594-4a65-bdee-67202c8847fd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.338957 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5v9jn\" (UniqueName: \"kubernetes.io/projected/37c204e4-e594-4a65-bdee-67202c8847fd-kube-api-access-5v9jn\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-bt745\" (UID: \"37c204e4-e594-4a65-bdee-67202c8847fd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:10 crc kubenswrapper[4800]: I1125 15:48:10.457834 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:11 crc kubenswrapper[4800]: I1125 15:48:11.007678 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745"] Nov 25 15:48:11 crc kubenswrapper[4800]: I1125 15:48:11.068473 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" event={"ID":"37c204e4-e594-4a65-bdee-67202c8847fd","Type":"ContainerStarted","Data":"d0da80526dc85e8d784405a6af068d2032d5ebe8a79bff0500e064e7f07206d1"} Nov 25 15:48:12 crc kubenswrapper[4800]: I1125 15:48:12.083169 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" event={"ID":"37c204e4-e594-4a65-bdee-67202c8847fd","Type":"ContainerStarted","Data":"e2903086cdacced893f5a7ba71cd01154d02500d46e9bd8e45a54705ea17a547"} Nov 25 15:48:12 crc kubenswrapper[4800]: I1125 15:48:12.112147 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" podStartSLOduration=1.5066683950000002 podStartE2EDuration="2.112122698s" podCreationTimestamp="2025-11-25 15:48:10 +0000 UTC" firstStartedPulling="2025-11-25 15:48:11.024162783 +0000 UTC m=+1852.078571265" lastFinishedPulling="2025-11-25 15:48:11.629617076 +0000 UTC m=+1852.684025568" observedRunningTime="2025-11-25 15:48:12.106183547 +0000 UTC m=+1853.160592049" watchObservedRunningTime="2025-11-25 15:48:12.112122698 +0000 UTC m=+1853.166531180" Nov 25 15:48:17 crc kubenswrapper[4800]: I1125 15:48:17.786125 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:48:17 crc kubenswrapper[4800]: E1125 15:48:17.787506 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:48:25 crc kubenswrapper[4800]: I1125 15:48:25.654907 4800 scope.go:117] "RemoveContainer" containerID="d4f6867f6fd82b227b474d6639a64646d69b0dbc982429ad7c5d7b85e6479182" Nov 25 15:48:25 crc kubenswrapper[4800]: I1125 15:48:25.686250 4800 scope.go:117] "RemoveContainer" containerID="6e07c0061071d76abda106c98f11d021f3e91e6206bf99d831012d7ea89fadce" Nov 25 15:48:25 crc kubenswrapper[4800]: I1125 15:48:25.741019 4800 scope.go:117] "RemoveContainer" containerID="30f971832485fc6ef3b5d63804e2e985284521908da238b4b61ee1433eea5aad" Nov 25 15:48:25 crc kubenswrapper[4800]: I1125 15:48:25.790112 4800 scope.go:117] "RemoveContainer" containerID="b0192bdaf22666e1dd439e8e12b5ff92088a8bbd38dc90a0fefe34c7f11acc18" Nov 25 15:48:25 crc kubenswrapper[4800]: I1125 15:48:25.824039 4800 scope.go:117] "RemoveContainer" containerID="28aeb7e8e7eb56c3ea859debe777a996479d8f06bd882c093544ff345e179e48" Nov 25 15:48:25 crc kubenswrapper[4800]: I1125 15:48:25.876782 4800 scope.go:117] "RemoveContainer" containerID="341352fd362a312240ea7d66abc5d59fabb822289e87582d2ac39b2b43684416" Nov 25 15:48:25 crc kubenswrapper[4800]: I1125 15:48:25.903479 4800 scope.go:117] "RemoveContainer" containerID="1efd704f7973837c024f03c126dd4e14e00c8a74d40bf0840bd49fe31d525108" Nov 25 15:48:25 crc kubenswrapper[4800]: I1125 15:48:25.933064 4800 scope.go:117] "RemoveContainer" containerID="9efae3f313f62828f4f2eacf5d2fc025c4c6e1168df51c56a70b5f3dd1c4bb19" Nov 25 15:48:25 crc kubenswrapper[4800]: I1125 15:48:25.959743 4800 scope.go:117] "RemoveContainer" containerID="e2784e7b40020c7a4b81decfe3bbf46edfbad8fd457ae2c0f009e7d54ce6e5e1" Nov 25 15:48:25 crc kubenswrapper[4800]: I1125 15:48:25.996371 4800 scope.go:117] "RemoveContainer" containerID="5cd6f865f54c020b55cdab91d2a1c4e5fb69be0675585cf2034baf9517c73651" Nov 25 15:48:26 crc kubenswrapper[4800]: I1125 15:48:26.015907 4800 scope.go:117] "RemoveContainer" containerID="3e420f4524fa3a852175ce114a113cb22785f6ac820fef1f7518b2d1c95ba4d6" Nov 25 15:48:26 crc kubenswrapper[4800]: I1125 15:48:26.053189 4800 scope.go:117] "RemoveContainer" containerID="d7f44252bcb12cfbfc1bfa560bdecbece3809b214ff9c5678657b9d0ee0446fa" Nov 25 15:48:29 crc kubenswrapper[4800]: I1125 15:48:29.793408 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:48:29 crc kubenswrapper[4800]: E1125 15:48:29.794211 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:48:33 crc kubenswrapper[4800]: I1125 15:48:33.052342 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-p9448"] Nov 25 15:48:33 crc kubenswrapper[4800]: I1125 15:48:33.066079 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-p9448"] Nov 25 15:48:33 crc kubenswrapper[4800]: I1125 15:48:33.804888 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb9722e1-2a72-4b42-a605-4e5476890d27" path="/var/lib/kubelet/pods/fb9722e1-2a72-4b42-a605-4e5476890d27/volumes" Nov 25 15:48:39 crc kubenswrapper[4800]: I1125 15:48:39.027164 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-fxh9k"] Nov 25 15:48:39 crc kubenswrapper[4800]: I1125 15:48:39.034157 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-fxh9k"] Nov 25 15:48:39 crc kubenswrapper[4800]: I1125 15:48:39.795909 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="861a549f-5373-4d45-befd-3859dbfdc705" path="/var/lib/kubelet/pods/861a549f-5373-4d45-befd-3859dbfdc705/volumes" Nov 25 15:48:41 crc kubenswrapper[4800]: I1125 15:48:41.786095 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:48:41 crc kubenswrapper[4800]: E1125 15:48:41.787170 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:48:49 crc kubenswrapper[4800]: I1125 15:48:49.432658 4800 generic.go:334] "Generic (PLEG): container finished" podID="37c204e4-e594-4a65-bdee-67202c8847fd" containerID="e2903086cdacced893f5a7ba71cd01154d02500d46e9bd8e45a54705ea17a547" exitCode=0 Nov 25 15:48:49 crc kubenswrapper[4800]: I1125 15:48:49.432755 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" event={"ID":"37c204e4-e594-4a65-bdee-67202c8847fd","Type":"ContainerDied","Data":"e2903086cdacced893f5a7ba71cd01154d02500d46e9bd8e45a54705ea17a547"} Nov 25 15:48:50 crc kubenswrapper[4800]: I1125 15:48:50.884612 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:50 crc kubenswrapper[4800]: I1125 15:48:50.933265 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5v9jn\" (UniqueName: \"kubernetes.io/projected/37c204e4-e594-4a65-bdee-67202c8847fd-kube-api-access-5v9jn\") pod \"37c204e4-e594-4a65-bdee-67202c8847fd\" (UID: \"37c204e4-e594-4a65-bdee-67202c8847fd\") " Nov 25 15:48:50 crc kubenswrapper[4800]: I1125 15:48:50.933461 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/37c204e4-e594-4a65-bdee-67202c8847fd-ssh-key\") pod \"37c204e4-e594-4a65-bdee-67202c8847fd\" (UID: \"37c204e4-e594-4a65-bdee-67202c8847fd\") " Nov 25 15:48:50 crc kubenswrapper[4800]: I1125 15:48:50.933504 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/37c204e4-e594-4a65-bdee-67202c8847fd-inventory\") pod \"37c204e4-e594-4a65-bdee-67202c8847fd\" (UID: \"37c204e4-e594-4a65-bdee-67202c8847fd\") " Nov 25 15:48:50 crc kubenswrapper[4800]: I1125 15:48:50.945095 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37c204e4-e594-4a65-bdee-67202c8847fd-kube-api-access-5v9jn" (OuterVolumeSpecName: "kube-api-access-5v9jn") pod "37c204e4-e594-4a65-bdee-67202c8847fd" (UID: "37c204e4-e594-4a65-bdee-67202c8847fd"). InnerVolumeSpecName "kube-api-access-5v9jn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:48:50 crc kubenswrapper[4800]: I1125 15:48:50.971264 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37c204e4-e594-4a65-bdee-67202c8847fd-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "37c204e4-e594-4a65-bdee-67202c8847fd" (UID: "37c204e4-e594-4a65-bdee-67202c8847fd"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:48:50 crc kubenswrapper[4800]: I1125 15:48:50.971718 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37c204e4-e594-4a65-bdee-67202c8847fd-inventory" (OuterVolumeSpecName: "inventory") pod "37c204e4-e594-4a65-bdee-67202c8847fd" (UID: "37c204e4-e594-4a65-bdee-67202c8847fd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.040105 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/37c204e4-e594-4a65-bdee-67202c8847fd-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.040148 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/37c204e4-e594-4a65-bdee-67202c8847fd-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.040158 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5v9jn\" (UniqueName: \"kubernetes.io/projected/37c204e4-e594-4a65-bdee-67202c8847fd-kube-api-access-5v9jn\") on node \"crc\" DevicePath \"\"" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.452576 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" event={"ID":"37c204e4-e594-4a65-bdee-67202c8847fd","Type":"ContainerDied","Data":"d0da80526dc85e8d784405a6af068d2032d5ebe8a79bff0500e064e7f07206d1"} Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.452616 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0da80526dc85e8d784405a6af068d2032d5ebe8a79bff0500e064e7f07206d1" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.452624 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.547539 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s"] Nov 25 15:48:51 crc kubenswrapper[4800]: E1125 15:48:51.548046 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37c204e4-e594-4a65-bdee-67202c8847fd" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.548073 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="37c204e4-e594-4a65-bdee-67202c8847fd" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.548335 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="37c204e4-e594-4a65-bdee-67202c8847fd" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.549228 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.557589 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.557632 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.557589 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.557707 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.558139 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s"] Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.651121 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pgq5\" (UniqueName: \"kubernetes.io/projected/0db87c6c-7306-4a1f-814f-312ff0ff1361-kube-api-access-7pgq5\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s\" (UID: \"0db87c6c-7306-4a1f-814f-312ff0ff1361\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.651199 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0db87c6c-7306-4a1f-814f-312ff0ff1361-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s\" (UID: \"0db87c6c-7306-4a1f-814f-312ff0ff1361\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.651236 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0db87c6c-7306-4a1f-814f-312ff0ff1361-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s\" (UID: \"0db87c6c-7306-4a1f-814f-312ff0ff1361\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.753694 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pgq5\" (UniqueName: \"kubernetes.io/projected/0db87c6c-7306-4a1f-814f-312ff0ff1361-kube-api-access-7pgq5\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s\" (UID: \"0db87c6c-7306-4a1f-814f-312ff0ff1361\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.753762 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0db87c6c-7306-4a1f-814f-312ff0ff1361-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s\" (UID: \"0db87c6c-7306-4a1f-814f-312ff0ff1361\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.753786 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0db87c6c-7306-4a1f-814f-312ff0ff1361-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s\" (UID: \"0db87c6c-7306-4a1f-814f-312ff0ff1361\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.758201 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0db87c6c-7306-4a1f-814f-312ff0ff1361-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s\" (UID: \"0db87c6c-7306-4a1f-814f-312ff0ff1361\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.759554 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0db87c6c-7306-4a1f-814f-312ff0ff1361-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s\" (UID: \"0db87c6c-7306-4a1f-814f-312ff0ff1361\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.773360 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pgq5\" (UniqueName: \"kubernetes.io/projected/0db87c6c-7306-4a1f-814f-312ff0ff1361-kube-api-access-7pgq5\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s\" (UID: \"0db87c6c-7306-4a1f-814f-312ff0ff1361\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:48:51 crc kubenswrapper[4800]: I1125 15:48:51.873988 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:48:52 crc kubenswrapper[4800]: I1125 15:48:52.377280 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s"] Nov 25 15:48:52 crc kubenswrapper[4800]: I1125 15:48:52.460652 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" event={"ID":"0db87c6c-7306-4a1f-814f-312ff0ff1361","Type":"ContainerStarted","Data":"0cac32a8a26d87c23bd10c5197eb058ffa75aa0c9b75dbd41573e25ad43d65a1"} Nov 25 15:48:52 crc kubenswrapper[4800]: I1125 15:48:52.786130 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:48:52 crc kubenswrapper[4800]: E1125 15:48:52.786437 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:48:54 crc kubenswrapper[4800]: I1125 15:48:54.497193 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" event={"ID":"0db87c6c-7306-4a1f-814f-312ff0ff1361","Type":"ContainerStarted","Data":"9c23e89d08682ad42319a20ee664c7e05cdd3a5a4a12c1cd1fe8addd8f317e9c"} Nov 25 15:48:54 crc kubenswrapper[4800]: I1125 15:48:54.515074 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" podStartSLOduration=1.854456024 podStartE2EDuration="3.515050881s" podCreationTimestamp="2025-11-25 15:48:51 +0000 UTC" firstStartedPulling="2025-11-25 15:48:52.381976423 +0000 UTC m=+1893.436384905" lastFinishedPulling="2025-11-25 15:48:54.04257128 +0000 UTC m=+1895.096979762" observedRunningTime="2025-11-25 15:48:54.512808899 +0000 UTC m=+1895.567217381" watchObservedRunningTime="2025-11-25 15:48:54.515050881 +0000 UTC m=+1895.569459363" Nov 25 15:48:58 crc kubenswrapper[4800]: I1125 15:48:58.538204 4800 generic.go:334] "Generic (PLEG): container finished" podID="0db87c6c-7306-4a1f-814f-312ff0ff1361" containerID="9c23e89d08682ad42319a20ee664c7e05cdd3a5a4a12c1cd1fe8addd8f317e9c" exitCode=0 Nov 25 15:48:58 crc kubenswrapper[4800]: I1125 15:48:58.538266 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" event={"ID":"0db87c6c-7306-4a1f-814f-312ff0ff1361","Type":"ContainerDied","Data":"9c23e89d08682ad42319a20ee664c7e05cdd3a5a4a12c1cd1fe8addd8f317e9c"} Nov 25 15:48:59 crc kubenswrapper[4800]: I1125 15:48:59.959035 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.012961 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0db87c6c-7306-4a1f-814f-312ff0ff1361-ssh-key\") pod \"0db87c6c-7306-4a1f-814f-312ff0ff1361\" (UID: \"0db87c6c-7306-4a1f-814f-312ff0ff1361\") " Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.013093 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0db87c6c-7306-4a1f-814f-312ff0ff1361-inventory\") pod \"0db87c6c-7306-4a1f-814f-312ff0ff1361\" (UID: \"0db87c6c-7306-4a1f-814f-312ff0ff1361\") " Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.013358 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pgq5\" (UniqueName: \"kubernetes.io/projected/0db87c6c-7306-4a1f-814f-312ff0ff1361-kube-api-access-7pgq5\") pod \"0db87c6c-7306-4a1f-814f-312ff0ff1361\" (UID: \"0db87c6c-7306-4a1f-814f-312ff0ff1361\") " Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.042286 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0db87c6c-7306-4a1f-814f-312ff0ff1361-kube-api-access-7pgq5" (OuterVolumeSpecName: "kube-api-access-7pgq5") pod "0db87c6c-7306-4a1f-814f-312ff0ff1361" (UID: "0db87c6c-7306-4a1f-814f-312ff0ff1361"). InnerVolumeSpecName "kube-api-access-7pgq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.046801 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0db87c6c-7306-4a1f-814f-312ff0ff1361-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0db87c6c-7306-4a1f-814f-312ff0ff1361" (UID: "0db87c6c-7306-4a1f-814f-312ff0ff1361"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.061727 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0db87c6c-7306-4a1f-814f-312ff0ff1361-inventory" (OuterVolumeSpecName: "inventory") pod "0db87c6c-7306-4a1f-814f-312ff0ff1361" (UID: "0db87c6c-7306-4a1f-814f-312ff0ff1361"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.116393 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pgq5\" (UniqueName: \"kubernetes.io/projected/0db87c6c-7306-4a1f-814f-312ff0ff1361-kube-api-access-7pgq5\") on node \"crc\" DevicePath \"\"" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.116435 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0db87c6c-7306-4a1f-814f-312ff0ff1361-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.116447 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0db87c6c-7306-4a1f-814f-312ff0ff1361-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.563738 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" event={"ID":"0db87c6c-7306-4a1f-814f-312ff0ff1361","Type":"ContainerDied","Data":"0cac32a8a26d87c23bd10c5197eb058ffa75aa0c9b75dbd41573e25ad43d65a1"} Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.563800 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0cac32a8a26d87c23bd10c5197eb058ffa75aa0c9b75dbd41573e25ad43d65a1" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.564367 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.654003 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q"] Nov 25 15:49:00 crc kubenswrapper[4800]: E1125 15:49:00.654439 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0db87c6c-7306-4a1f-814f-312ff0ff1361" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.654461 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0db87c6c-7306-4a1f-814f-312ff0ff1361" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.654642 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="0db87c6c-7306-4a1f-814f-312ff0ff1361" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.655250 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.659138 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.660496 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.660582 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.660756 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q"] Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.663911 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.726385 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6049b73c-a6c2-490c-8076-86b69a295a0c-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-jf74q\" (UID: \"6049b73c-a6c2-490c-8076-86b69a295a0c\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.727363 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qwdz\" (UniqueName: \"kubernetes.io/projected/6049b73c-a6c2-490c-8076-86b69a295a0c-kube-api-access-9qwdz\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-jf74q\" (UID: \"6049b73c-a6c2-490c-8076-86b69a295a0c\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.727643 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6049b73c-a6c2-490c-8076-86b69a295a0c-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-jf74q\" (UID: \"6049b73c-a6c2-490c-8076-86b69a295a0c\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.829956 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6049b73c-a6c2-490c-8076-86b69a295a0c-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-jf74q\" (UID: \"6049b73c-a6c2-490c-8076-86b69a295a0c\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.830101 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qwdz\" (UniqueName: \"kubernetes.io/projected/6049b73c-a6c2-490c-8076-86b69a295a0c-kube-api-access-9qwdz\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-jf74q\" (UID: \"6049b73c-a6c2-490c-8076-86b69a295a0c\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.830250 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6049b73c-a6c2-490c-8076-86b69a295a0c-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-jf74q\" (UID: \"6049b73c-a6c2-490c-8076-86b69a295a0c\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.837255 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6049b73c-a6c2-490c-8076-86b69a295a0c-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-jf74q\" (UID: \"6049b73c-a6c2-490c-8076-86b69a295a0c\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.841652 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6049b73c-a6c2-490c-8076-86b69a295a0c-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-jf74q\" (UID: \"6049b73c-a6c2-490c-8076-86b69a295a0c\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.850946 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qwdz\" (UniqueName: \"kubernetes.io/projected/6049b73c-a6c2-490c-8076-86b69a295a0c-kube-api-access-9qwdz\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-jf74q\" (UID: \"6049b73c-a6c2-490c-8076-86b69a295a0c\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:00 crc kubenswrapper[4800]: I1125 15:49:00.985622 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:01 crc kubenswrapper[4800]: I1125 15:49:01.526475 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q"] Nov 25 15:49:01 crc kubenswrapper[4800]: I1125 15:49:01.573923 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" event={"ID":"6049b73c-a6c2-490c-8076-86b69a295a0c","Type":"ContainerStarted","Data":"5d63d845b8b5fa24dbde5b58d8de18bbb29f915387f3c983b6f1e4ff8b01fb56"} Nov 25 15:49:02 crc kubenswrapper[4800]: I1125 15:49:02.583227 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" event={"ID":"6049b73c-a6c2-490c-8076-86b69a295a0c","Type":"ContainerStarted","Data":"de8a3d4f7f94bca04b2c21918f2a16a5d2a4c260c18cde61d5209cd1a11e4d86"} Nov 25 15:49:02 crc kubenswrapper[4800]: I1125 15:49:02.602486 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" podStartSLOduration=2.098569915 podStartE2EDuration="2.602464658s" podCreationTimestamp="2025-11-25 15:49:00 +0000 UTC" firstStartedPulling="2025-11-25 15:49:01.539028009 +0000 UTC m=+1902.593436501" lastFinishedPulling="2025-11-25 15:49:02.042922772 +0000 UTC m=+1903.097331244" observedRunningTime="2025-11-25 15:49:02.598915541 +0000 UTC m=+1903.653324043" watchObservedRunningTime="2025-11-25 15:49:02.602464658 +0000 UTC m=+1903.656873140" Nov 25 15:49:07 crc kubenswrapper[4800]: I1125 15:49:07.786749 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:49:07 crc kubenswrapper[4800]: E1125 15:49:07.787976 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:49:18 crc kubenswrapper[4800]: I1125 15:49:18.785125 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:49:18 crc kubenswrapper[4800]: E1125 15:49:18.786168 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:49:19 crc kubenswrapper[4800]: I1125 15:49:19.043607 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-vvflf"] Nov 25 15:49:19 crc kubenswrapper[4800]: I1125 15:49:19.053611 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-vvflf"] Nov 25 15:49:19 crc kubenswrapper[4800]: I1125 15:49:19.803850 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a8a8ce2-c939-4626-b487-022750cd3090" path="/var/lib/kubelet/pods/7a8a8ce2-c939-4626-b487-022750cd3090/volumes" Nov 25 15:49:20 crc kubenswrapper[4800]: I1125 15:49:20.036398 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-7w7c7"] Nov 25 15:49:20 crc kubenswrapper[4800]: I1125 15:49:20.047794 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-7w7c7"] Nov 25 15:49:21 crc kubenswrapper[4800]: I1125 15:49:21.801223 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e726809-c215-4d1a-95a3-d0fadede3cca" path="/var/lib/kubelet/pods/8e726809-c215-4d1a-95a3-d0fadede3cca/volumes" Nov 25 15:49:26 crc kubenswrapper[4800]: I1125 15:49:26.300318 4800 scope.go:117] "RemoveContainer" containerID="cda77c859ed115ed0fac15e1ac8a4a8fbe21eda6d07686adc9effa36f72d5781" Nov 25 15:49:26 crc kubenswrapper[4800]: I1125 15:49:26.338053 4800 scope.go:117] "RemoveContainer" containerID="e504eafaa9813815df1bd62428bfb4932db7b3b16b2bbcc17fd995833ca13b55" Nov 25 15:49:26 crc kubenswrapper[4800]: I1125 15:49:26.381700 4800 scope.go:117] "RemoveContainer" containerID="e54f64b475f6328a1c93e9a91914b6b6980df6c41dd5fb71f5da7f04d7d04191" Nov 25 15:49:26 crc kubenswrapper[4800]: I1125 15:49:26.429767 4800 scope.go:117] "RemoveContainer" containerID="98ef89cabf311e36ec55a79fd9be06b9bdafd54d8aac03c098e384cf70c713ef" Nov 25 15:49:29 crc kubenswrapper[4800]: I1125 15:49:29.796490 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:49:29 crc kubenswrapper[4800]: E1125 15:49:29.797321 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:49:31 crc kubenswrapper[4800]: I1125 15:49:31.041982 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-rq7kb"] Nov 25 15:49:31 crc kubenswrapper[4800]: I1125 15:49:31.054986 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-shvrf"] Nov 25 15:49:31 crc kubenswrapper[4800]: I1125 15:49:31.066129 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-rq7kb"] Nov 25 15:49:31 crc kubenswrapper[4800]: I1125 15:49:31.074800 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-shvrf"] Nov 25 15:49:31 crc kubenswrapper[4800]: I1125 15:49:31.802485 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28782c8c-88d7-48d6-bd10-3b64cff49706" path="/var/lib/kubelet/pods/28782c8c-88d7-48d6-bd10-3b64cff49706/volumes" Nov 25 15:49:31 crc kubenswrapper[4800]: I1125 15:49:31.804864 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b030f9b2-f92c-40d4-b92a-7c99d4af8358" path="/var/lib/kubelet/pods/b030f9b2-f92c-40d4-b92a-7c99d4af8358/volumes" Nov 25 15:49:33 crc kubenswrapper[4800]: I1125 15:49:33.035356 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-2d48v"] Nov 25 15:49:33 crc kubenswrapper[4800]: I1125 15:49:33.044450 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-2d48v"] Nov 25 15:49:33 crc kubenswrapper[4800]: I1125 15:49:33.799753 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15830745-aef8-4482-8885-6a5969795af6" path="/var/lib/kubelet/pods/15830745-aef8-4482-8885-6a5969795af6/volumes" Nov 25 15:49:43 crc kubenswrapper[4800]: I1125 15:49:43.785406 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:49:43 crc kubenswrapper[4800]: E1125 15:49:43.786307 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:49:54 crc kubenswrapper[4800]: I1125 15:49:54.103636 4800 generic.go:334] "Generic (PLEG): container finished" podID="6049b73c-a6c2-490c-8076-86b69a295a0c" containerID="de8a3d4f7f94bca04b2c21918f2a16a5d2a4c260c18cde61d5209cd1a11e4d86" exitCode=0 Nov 25 15:49:54 crc kubenswrapper[4800]: I1125 15:49:54.103722 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" event={"ID":"6049b73c-a6c2-490c-8076-86b69a295a0c","Type":"ContainerDied","Data":"de8a3d4f7f94bca04b2c21918f2a16a5d2a4c260c18cde61d5209cd1a11e4d86"} Nov 25 15:49:55 crc kubenswrapper[4800]: I1125 15:49:55.619700 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:55 crc kubenswrapper[4800]: I1125 15:49:55.755927 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qwdz\" (UniqueName: \"kubernetes.io/projected/6049b73c-a6c2-490c-8076-86b69a295a0c-kube-api-access-9qwdz\") pod \"6049b73c-a6c2-490c-8076-86b69a295a0c\" (UID: \"6049b73c-a6c2-490c-8076-86b69a295a0c\") " Nov 25 15:49:55 crc kubenswrapper[4800]: I1125 15:49:55.756319 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6049b73c-a6c2-490c-8076-86b69a295a0c-ssh-key\") pod \"6049b73c-a6c2-490c-8076-86b69a295a0c\" (UID: \"6049b73c-a6c2-490c-8076-86b69a295a0c\") " Nov 25 15:49:55 crc kubenswrapper[4800]: I1125 15:49:55.756538 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6049b73c-a6c2-490c-8076-86b69a295a0c-inventory\") pod \"6049b73c-a6c2-490c-8076-86b69a295a0c\" (UID: \"6049b73c-a6c2-490c-8076-86b69a295a0c\") " Nov 25 15:49:55 crc kubenswrapper[4800]: I1125 15:49:55.763460 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6049b73c-a6c2-490c-8076-86b69a295a0c-kube-api-access-9qwdz" (OuterVolumeSpecName: "kube-api-access-9qwdz") pod "6049b73c-a6c2-490c-8076-86b69a295a0c" (UID: "6049b73c-a6c2-490c-8076-86b69a295a0c"). InnerVolumeSpecName "kube-api-access-9qwdz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:49:55 crc kubenswrapper[4800]: I1125 15:49:55.785703 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:49:55 crc kubenswrapper[4800]: E1125 15:49:55.786184 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:49:55 crc kubenswrapper[4800]: I1125 15:49:55.786437 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6049b73c-a6c2-490c-8076-86b69a295a0c-inventory" (OuterVolumeSpecName: "inventory") pod "6049b73c-a6c2-490c-8076-86b69a295a0c" (UID: "6049b73c-a6c2-490c-8076-86b69a295a0c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:49:55 crc kubenswrapper[4800]: I1125 15:49:55.809838 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6049b73c-a6c2-490c-8076-86b69a295a0c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6049b73c-a6c2-490c-8076-86b69a295a0c" (UID: "6049b73c-a6c2-490c-8076-86b69a295a0c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:49:55 crc kubenswrapper[4800]: I1125 15:49:55.859361 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6049b73c-a6c2-490c-8076-86b69a295a0c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:49:55 crc kubenswrapper[4800]: I1125 15:49:55.859441 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6049b73c-a6c2-490c-8076-86b69a295a0c-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:49:55 crc kubenswrapper[4800]: I1125 15:49:55.859457 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qwdz\" (UniqueName: \"kubernetes.io/projected/6049b73c-a6c2-490c-8076-86b69a295a0c-kube-api-access-9qwdz\") on node \"crc\" DevicePath \"\"" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.141345 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" event={"ID":"6049b73c-a6c2-490c-8076-86b69a295a0c","Type":"ContainerDied","Data":"5d63d845b8b5fa24dbde5b58d8de18bbb29f915387f3c983b6f1e4ff8b01fb56"} Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.141974 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d63d845b8b5fa24dbde5b58d8de18bbb29f915387f3c983b6f1e4ff8b01fb56" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.141434 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.219477 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xtj7g"] Nov 25 15:49:56 crc kubenswrapper[4800]: E1125 15:49:56.220006 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6049b73c-a6c2-490c-8076-86b69a295a0c" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.220030 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="6049b73c-a6c2-490c-8076-86b69a295a0c" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.220294 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="6049b73c-a6c2-490c-8076-86b69a295a0c" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.221138 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.224078 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.224388 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.226181 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.230164 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.233027 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xtj7g"] Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.368528 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwxn7\" (UniqueName: \"kubernetes.io/projected/a28afd1d-fdf7-4a3b-9353-071ef1c85944-kube-api-access-dwxn7\") pod \"ssh-known-hosts-edpm-deployment-xtj7g\" (UID: \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\") " pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.368618 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a28afd1d-fdf7-4a3b-9353-071ef1c85944-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-xtj7g\" (UID: \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\") " pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.368684 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a28afd1d-fdf7-4a3b-9353-071ef1c85944-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-xtj7g\" (UID: \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\") " pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.470134 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a28afd1d-fdf7-4a3b-9353-071ef1c85944-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-xtj7g\" (UID: \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\") " pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.470289 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a28afd1d-fdf7-4a3b-9353-071ef1c85944-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-xtj7g\" (UID: \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\") " pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.470376 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwxn7\" (UniqueName: \"kubernetes.io/projected/a28afd1d-fdf7-4a3b-9353-071ef1c85944-kube-api-access-dwxn7\") pod \"ssh-known-hosts-edpm-deployment-xtj7g\" (UID: \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\") " pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.475582 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a28afd1d-fdf7-4a3b-9353-071ef1c85944-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-xtj7g\" (UID: \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\") " pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.475620 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a28afd1d-fdf7-4a3b-9353-071ef1c85944-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-xtj7g\" (UID: \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\") " pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.493028 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwxn7\" (UniqueName: \"kubernetes.io/projected/a28afd1d-fdf7-4a3b-9353-071ef1c85944-kube-api-access-dwxn7\") pod \"ssh-known-hosts-edpm-deployment-xtj7g\" (UID: \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\") " pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:49:56 crc kubenswrapper[4800]: I1125 15:49:56.540359 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:49:57 crc kubenswrapper[4800]: I1125 15:49:57.104531 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xtj7g"] Nov 25 15:49:57 crc kubenswrapper[4800]: I1125 15:49:57.153243 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" event={"ID":"a28afd1d-fdf7-4a3b-9353-071ef1c85944","Type":"ContainerStarted","Data":"f595c9d57ff261805016e13f416b2b83a769235e587caeb874307066bea1f40f"} Nov 25 15:49:59 crc kubenswrapper[4800]: I1125 15:49:59.178584 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" event={"ID":"a28afd1d-fdf7-4a3b-9353-071ef1c85944","Type":"ContainerStarted","Data":"5fc8b4c83631711a5dc19584c6c649aa22e4b5fbbf5ce4719933a278b1f63919"} Nov 25 15:49:59 crc kubenswrapper[4800]: I1125 15:49:59.215962 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" podStartSLOduration=2.360506909 podStartE2EDuration="3.215928005s" podCreationTimestamp="2025-11-25 15:49:56 +0000 UTC" firstStartedPulling="2025-11-25 15:49:57.10648219 +0000 UTC m=+1958.160890692" lastFinishedPulling="2025-11-25 15:49:57.961903306 +0000 UTC m=+1959.016311788" observedRunningTime="2025-11-25 15:49:59.204523895 +0000 UTC m=+1960.258932377" watchObservedRunningTime="2025-11-25 15:49:59.215928005 +0000 UTC m=+1960.270336487" Nov 25 15:50:06 crc kubenswrapper[4800]: I1125 15:50:06.246598 4800 generic.go:334] "Generic (PLEG): container finished" podID="a28afd1d-fdf7-4a3b-9353-071ef1c85944" containerID="5fc8b4c83631711a5dc19584c6c649aa22e4b5fbbf5ce4719933a278b1f63919" exitCode=0 Nov 25 15:50:06 crc kubenswrapper[4800]: I1125 15:50:06.247652 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" event={"ID":"a28afd1d-fdf7-4a3b-9353-071ef1c85944","Type":"ContainerDied","Data":"5fc8b4c83631711a5dc19584c6c649aa22e4b5fbbf5ce4719933a278b1f63919"} Nov 25 15:50:06 crc kubenswrapper[4800]: I1125 15:50:06.785429 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:50:06 crc kubenswrapper[4800]: E1125 15:50:06.786183 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:50:07 crc kubenswrapper[4800]: I1125 15:50:07.661791 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:50:07 crc kubenswrapper[4800]: I1125 15:50:07.802417 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a28afd1d-fdf7-4a3b-9353-071ef1c85944-inventory-0\") pod \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\" (UID: \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\") " Nov 25 15:50:07 crc kubenswrapper[4800]: I1125 15:50:07.802694 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a28afd1d-fdf7-4a3b-9353-071ef1c85944-ssh-key-openstack-edpm-ipam\") pod \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\" (UID: \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\") " Nov 25 15:50:07 crc kubenswrapper[4800]: I1125 15:50:07.802757 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwxn7\" (UniqueName: \"kubernetes.io/projected/a28afd1d-fdf7-4a3b-9353-071ef1c85944-kube-api-access-dwxn7\") pod \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\" (UID: \"a28afd1d-fdf7-4a3b-9353-071ef1c85944\") " Nov 25 15:50:07 crc kubenswrapper[4800]: I1125 15:50:07.826662 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a28afd1d-fdf7-4a3b-9353-071ef1c85944-kube-api-access-dwxn7" (OuterVolumeSpecName: "kube-api-access-dwxn7") pod "a28afd1d-fdf7-4a3b-9353-071ef1c85944" (UID: "a28afd1d-fdf7-4a3b-9353-071ef1c85944"). InnerVolumeSpecName "kube-api-access-dwxn7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:50:07 crc kubenswrapper[4800]: I1125 15:50:07.838806 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a28afd1d-fdf7-4a3b-9353-071ef1c85944-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "a28afd1d-fdf7-4a3b-9353-071ef1c85944" (UID: "a28afd1d-fdf7-4a3b-9353-071ef1c85944"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:50:07 crc kubenswrapper[4800]: I1125 15:50:07.840010 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a28afd1d-fdf7-4a3b-9353-071ef1c85944-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "a28afd1d-fdf7-4a3b-9353-071ef1c85944" (UID: "a28afd1d-fdf7-4a3b-9353-071ef1c85944"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:50:07 crc kubenswrapper[4800]: I1125 15:50:07.905068 4800 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a28afd1d-fdf7-4a3b-9353-071ef1c85944-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 15:50:07 crc kubenswrapper[4800]: I1125 15:50:07.905107 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a28afd1d-fdf7-4a3b-9353-071ef1c85944-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 15:50:07 crc kubenswrapper[4800]: I1125 15:50:07.905125 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwxn7\" (UniqueName: \"kubernetes.io/projected/a28afd1d-fdf7-4a3b-9353-071ef1c85944-kube-api-access-dwxn7\") on node \"crc\" DevicePath \"\"" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.269131 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" event={"ID":"a28afd1d-fdf7-4a3b-9353-071ef1c85944","Type":"ContainerDied","Data":"f595c9d57ff261805016e13f416b2b83a769235e587caeb874307066bea1f40f"} Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.269193 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-xtj7g" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.269199 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f595c9d57ff261805016e13f416b2b83a769235e587caeb874307066bea1f40f" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.353278 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v"] Nov 25 15:50:08 crc kubenswrapper[4800]: E1125 15:50:08.353710 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a28afd1d-fdf7-4a3b-9353-071ef1c85944" containerName="ssh-known-hosts-edpm-deployment" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.353725 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a28afd1d-fdf7-4a3b-9353-071ef1c85944" containerName="ssh-known-hosts-edpm-deployment" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.353981 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="a28afd1d-fdf7-4a3b-9353-071ef1c85944" containerName="ssh-known-hosts-edpm-deployment" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.354808 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.357689 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.357883 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.357951 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.358191 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.369293 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v"] Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.515887 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d98b1555-9d8a-4311-afe5-eefa81cf571e-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-nd59v\" (UID: \"d98b1555-9d8a-4311-afe5-eefa81cf571e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.515961 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d98b1555-9d8a-4311-afe5-eefa81cf571e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-nd59v\" (UID: \"d98b1555-9d8a-4311-afe5-eefa81cf571e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.516082 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngnd8\" (UniqueName: \"kubernetes.io/projected/d98b1555-9d8a-4311-afe5-eefa81cf571e-kube-api-access-ngnd8\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-nd59v\" (UID: \"d98b1555-9d8a-4311-afe5-eefa81cf571e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.617539 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d98b1555-9d8a-4311-afe5-eefa81cf571e-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-nd59v\" (UID: \"d98b1555-9d8a-4311-afe5-eefa81cf571e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.617626 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d98b1555-9d8a-4311-afe5-eefa81cf571e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-nd59v\" (UID: \"d98b1555-9d8a-4311-afe5-eefa81cf571e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.617769 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngnd8\" (UniqueName: \"kubernetes.io/projected/d98b1555-9d8a-4311-afe5-eefa81cf571e-kube-api-access-ngnd8\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-nd59v\" (UID: \"d98b1555-9d8a-4311-afe5-eefa81cf571e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.623772 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d98b1555-9d8a-4311-afe5-eefa81cf571e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-nd59v\" (UID: \"d98b1555-9d8a-4311-afe5-eefa81cf571e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.623930 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d98b1555-9d8a-4311-afe5-eefa81cf571e-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-nd59v\" (UID: \"d98b1555-9d8a-4311-afe5-eefa81cf571e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.635400 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngnd8\" (UniqueName: \"kubernetes.io/projected/d98b1555-9d8a-4311-afe5-eefa81cf571e-kube-api-access-ngnd8\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-nd59v\" (UID: \"d98b1555-9d8a-4311-afe5-eefa81cf571e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:08 crc kubenswrapper[4800]: I1125 15:50:08.672908 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:09 crc kubenswrapper[4800]: I1125 15:50:09.034516 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-487b-account-create-5gkwt"] Nov 25 15:50:09 crc kubenswrapper[4800]: I1125 15:50:09.041469 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-m8npp"] Nov 25 15:50:09 crc kubenswrapper[4800]: I1125 15:50:09.049413 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-m8npp"] Nov 25 15:50:09 crc kubenswrapper[4800]: I1125 15:50:09.057197 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-487b-account-create-5gkwt"] Nov 25 15:50:09 crc kubenswrapper[4800]: I1125 15:50:09.222628 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v"] Nov 25 15:50:09 crc kubenswrapper[4800]: W1125 15:50:09.229078 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd98b1555_9d8a_4311_afe5_eefa81cf571e.slice/crio-2d8ca13152f58ae1073d0c9d48ae750b90e29860e7af8648039e9830c4eac27b WatchSource:0}: Error finding container 2d8ca13152f58ae1073d0c9d48ae750b90e29860e7af8648039e9830c4eac27b: Status 404 returned error can't find the container with id 2d8ca13152f58ae1073d0c9d48ae750b90e29860e7af8648039e9830c4eac27b Nov 25 15:50:09 crc kubenswrapper[4800]: I1125 15:50:09.277896 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" event={"ID":"d98b1555-9d8a-4311-afe5-eefa81cf571e","Type":"ContainerStarted","Data":"2d8ca13152f58ae1073d0c9d48ae750b90e29860e7af8648039e9830c4eac27b"} Nov 25 15:50:09 crc kubenswrapper[4800]: I1125 15:50:09.795677 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fa7d7c4-8163-4172-9ce7-96aae8e0e627" path="/var/lib/kubelet/pods/1fa7d7c4-8163-4172-9ce7-96aae8e0e627/volumes" Nov 25 15:50:09 crc kubenswrapper[4800]: I1125 15:50:09.796516 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a909488-3abb-437b-9136-0b5856ff1700" path="/var/lib/kubelet/pods/3a909488-3abb-437b-9136-0b5856ff1700/volumes" Nov 25 15:50:10 crc kubenswrapper[4800]: I1125 15:50:10.035606 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-7efa-account-create-7lrdr"] Nov 25 15:50:10 crc kubenswrapper[4800]: I1125 15:50:10.051132 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-b636-account-create-6dm4h"] Nov 25 15:50:10 crc kubenswrapper[4800]: I1125 15:50:10.060883 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-ms2dt"] Nov 25 15:50:10 crc kubenswrapper[4800]: I1125 15:50:10.068451 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-b636-account-create-6dm4h"] Nov 25 15:50:10 crc kubenswrapper[4800]: I1125 15:50:10.087515 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-hjmpn"] Nov 25 15:50:10 crc kubenswrapper[4800]: I1125 15:50:10.098374 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-7efa-account-create-7lrdr"] Nov 25 15:50:10 crc kubenswrapper[4800]: I1125 15:50:10.105750 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-hjmpn"] Nov 25 15:50:10 crc kubenswrapper[4800]: I1125 15:50:10.115890 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-ms2dt"] Nov 25 15:50:11 crc kubenswrapper[4800]: I1125 15:50:11.297837 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" event={"ID":"d98b1555-9d8a-4311-afe5-eefa81cf571e","Type":"ContainerStarted","Data":"fa509734f7432e215c40f1672948caa8acdce466be9bcf2676224be9cc9a8912"} Nov 25 15:50:11 crc kubenswrapper[4800]: I1125 15:50:11.330348 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" podStartSLOduration=2.4940950219999998 podStartE2EDuration="3.330328977s" podCreationTimestamp="2025-11-25 15:50:08 +0000 UTC" firstStartedPulling="2025-11-25 15:50:09.232983731 +0000 UTC m=+1970.287392213" lastFinishedPulling="2025-11-25 15:50:10.069217686 +0000 UTC m=+1971.123626168" observedRunningTime="2025-11-25 15:50:11.31977884 +0000 UTC m=+1972.374187322" watchObservedRunningTime="2025-11-25 15:50:11.330328977 +0000 UTC m=+1972.384737459" Nov 25 15:50:11 crc kubenswrapper[4800]: I1125 15:50:11.803000 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03a21aaa-366d-48c9-bf08-3ea77b154123" path="/var/lib/kubelet/pods/03a21aaa-366d-48c9-bf08-3ea77b154123/volumes" Nov 25 15:50:11 crc kubenswrapper[4800]: I1125 15:50:11.803592 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b6cb1a4-848f-4af5-bd9a-563d2ccd630e" path="/var/lib/kubelet/pods/1b6cb1a4-848f-4af5-bd9a-563d2ccd630e/volumes" Nov 25 15:50:11 crc kubenswrapper[4800]: I1125 15:50:11.804114 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1da2fa33-150f-4450-8504-ac3d17932a8e" path="/var/lib/kubelet/pods/1da2fa33-150f-4450-8504-ac3d17932a8e/volumes" Nov 25 15:50:11 crc kubenswrapper[4800]: I1125 15:50:11.804614 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="840d89d2-2842-4aa9-a9f5-4de3794dcb34" path="/var/lib/kubelet/pods/840d89d2-2842-4aa9-a9f5-4de3794dcb34/volumes" Nov 25 15:50:19 crc kubenswrapper[4800]: I1125 15:50:19.368556 4800 generic.go:334] "Generic (PLEG): container finished" podID="d98b1555-9d8a-4311-afe5-eefa81cf571e" containerID="fa509734f7432e215c40f1672948caa8acdce466be9bcf2676224be9cc9a8912" exitCode=0 Nov 25 15:50:19 crc kubenswrapper[4800]: I1125 15:50:19.368641 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" event={"ID":"d98b1555-9d8a-4311-afe5-eefa81cf571e","Type":"ContainerDied","Data":"fa509734f7432e215c40f1672948caa8acdce466be9bcf2676224be9cc9a8912"} Nov 25 15:50:19 crc kubenswrapper[4800]: I1125 15:50:19.785684 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:50:20 crc kubenswrapper[4800]: I1125 15:50:20.380788 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"d56ce1859034223339fd1fd96e8443b09843fa53af7fd449295e1c611c5e11ab"} Nov 25 15:50:20 crc kubenswrapper[4800]: I1125 15:50:20.776766 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:20 crc kubenswrapper[4800]: I1125 15:50:20.847482 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d98b1555-9d8a-4311-afe5-eefa81cf571e-ssh-key\") pod \"d98b1555-9d8a-4311-afe5-eefa81cf571e\" (UID: \"d98b1555-9d8a-4311-afe5-eefa81cf571e\") " Nov 25 15:50:20 crc kubenswrapper[4800]: I1125 15:50:20.847755 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d98b1555-9d8a-4311-afe5-eefa81cf571e-inventory\") pod \"d98b1555-9d8a-4311-afe5-eefa81cf571e\" (UID: \"d98b1555-9d8a-4311-afe5-eefa81cf571e\") " Nov 25 15:50:20 crc kubenswrapper[4800]: I1125 15:50:20.847789 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngnd8\" (UniqueName: \"kubernetes.io/projected/d98b1555-9d8a-4311-afe5-eefa81cf571e-kube-api-access-ngnd8\") pod \"d98b1555-9d8a-4311-afe5-eefa81cf571e\" (UID: \"d98b1555-9d8a-4311-afe5-eefa81cf571e\") " Nov 25 15:50:20 crc kubenswrapper[4800]: I1125 15:50:20.855935 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d98b1555-9d8a-4311-afe5-eefa81cf571e-kube-api-access-ngnd8" (OuterVolumeSpecName: "kube-api-access-ngnd8") pod "d98b1555-9d8a-4311-afe5-eefa81cf571e" (UID: "d98b1555-9d8a-4311-afe5-eefa81cf571e"). InnerVolumeSpecName "kube-api-access-ngnd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:50:20 crc kubenswrapper[4800]: I1125 15:50:20.877050 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d98b1555-9d8a-4311-afe5-eefa81cf571e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d98b1555-9d8a-4311-afe5-eefa81cf571e" (UID: "d98b1555-9d8a-4311-afe5-eefa81cf571e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:50:20 crc kubenswrapper[4800]: I1125 15:50:20.878824 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d98b1555-9d8a-4311-afe5-eefa81cf571e-inventory" (OuterVolumeSpecName: "inventory") pod "d98b1555-9d8a-4311-afe5-eefa81cf571e" (UID: "d98b1555-9d8a-4311-afe5-eefa81cf571e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:50:20 crc kubenswrapper[4800]: I1125 15:50:20.949836 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d98b1555-9d8a-4311-afe5-eefa81cf571e-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:50:20 crc kubenswrapper[4800]: I1125 15:50:20.949884 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngnd8\" (UniqueName: \"kubernetes.io/projected/d98b1555-9d8a-4311-afe5-eefa81cf571e-kube-api-access-ngnd8\") on node \"crc\" DevicePath \"\"" Nov 25 15:50:20 crc kubenswrapper[4800]: I1125 15:50:20.949895 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d98b1555-9d8a-4311-afe5-eefa81cf571e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.390199 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" event={"ID":"d98b1555-9d8a-4311-afe5-eefa81cf571e","Type":"ContainerDied","Data":"2d8ca13152f58ae1073d0c9d48ae750b90e29860e7af8648039e9830c4eac27b"} Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.390658 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d8ca13152f58ae1073d0c9d48ae750b90e29860e7af8648039e9830c4eac27b" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.390270 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.463002 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6"] Nov 25 15:50:21 crc kubenswrapper[4800]: E1125 15:50:21.463617 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d98b1555-9d8a-4311-afe5-eefa81cf571e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.463722 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="d98b1555-9d8a-4311-afe5-eefa81cf571e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.463970 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="d98b1555-9d8a-4311-afe5-eefa81cf571e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.464619 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.468898 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.469298 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.469776 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.470306 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.489472 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6"] Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.560398 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6\" (UID: \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.560451 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6\" (UID: \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.560938 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzvp5\" (UniqueName: \"kubernetes.io/projected/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-kube-api-access-bzvp5\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6\" (UID: \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.662118 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzvp5\" (UniqueName: \"kubernetes.io/projected/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-kube-api-access-bzvp5\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6\" (UID: \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.662202 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6\" (UID: \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.662225 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6\" (UID: \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.667574 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6\" (UID: \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.668394 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6\" (UID: \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.681148 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzvp5\" (UniqueName: \"kubernetes.io/projected/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-kube-api-access-bzvp5\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6\" (UID: \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:21 crc kubenswrapper[4800]: I1125 15:50:21.788334 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:22 crc kubenswrapper[4800]: I1125 15:50:22.304081 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6"] Nov 25 15:50:22 crc kubenswrapper[4800]: I1125 15:50:22.398651 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" event={"ID":"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf","Type":"ContainerStarted","Data":"e4b50fdb2e35fe119f887a39873e2869287be5940057a4724a98506654d45590"} Nov 25 15:50:23 crc kubenswrapper[4800]: I1125 15:50:23.412179 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" event={"ID":"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf","Type":"ContainerStarted","Data":"5015d1d592596102a55f8cac42b134addbb1ec5a5f09298977d4ab52dc4d0f0a"} Nov 25 15:50:23 crc kubenswrapper[4800]: I1125 15:50:23.437980 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" podStartSLOduration=1.794448477 podStartE2EDuration="2.437835852s" podCreationTimestamp="2025-11-25 15:50:21 +0000 UTC" firstStartedPulling="2025-11-25 15:50:22.305521731 +0000 UTC m=+1983.359930213" lastFinishedPulling="2025-11-25 15:50:22.948909106 +0000 UTC m=+1984.003317588" observedRunningTime="2025-11-25 15:50:23.433256727 +0000 UTC m=+1984.487665209" watchObservedRunningTime="2025-11-25 15:50:23.437835852 +0000 UTC m=+1984.492244334" Nov 25 15:50:26 crc kubenswrapper[4800]: I1125 15:50:26.548374 4800 scope.go:117] "RemoveContainer" containerID="734e4da61e6068b40675801ff6b71f2ef520eabc8b67b51099a26a328990b9a2" Nov 25 15:50:26 crc kubenswrapper[4800]: I1125 15:50:26.575675 4800 scope.go:117] "RemoveContainer" containerID="0eefb7b7fc795dec6cd9666544ce5a4b72c5db4487024f3254aa19e1f2bcbfb8" Nov 25 15:50:26 crc kubenswrapper[4800]: I1125 15:50:26.637588 4800 scope.go:117] "RemoveContainer" containerID="ae142a7058f992b3199aa5d54d3f5edc41955c1015e086f975ca298de036731a" Nov 25 15:50:26 crc kubenswrapper[4800]: I1125 15:50:26.657325 4800 scope.go:117] "RemoveContainer" containerID="dd8d93fa2221b4532728f6487b5417138c9468699b9ba960e2ede7e905bbaffc" Nov 25 15:50:26 crc kubenswrapper[4800]: I1125 15:50:26.705450 4800 scope.go:117] "RemoveContainer" containerID="84bf5dd202b09f7cb71ce72a81067f398fd41c26ccaab69843c093b9c637edd8" Nov 25 15:50:26 crc kubenswrapper[4800]: I1125 15:50:26.749264 4800 scope.go:117] "RemoveContainer" containerID="5f3411508cfef89239b8cab3a83cb6b80b08a61992feda649c1a6193b7f1b7ad" Nov 25 15:50:26 crc kubenswrapper[4800]: I1125 15:50:26.806266 4800 scope.go:117] "RemoveContainer" containerID="ba816d6eaa1f597a866744f7884185d6679bacca65eab4bd5ab973f10cb49f2e" Nov 25 15:50:26 crc kubenswrapper[4800]: I1125 15:50:26.832093 4800 scope.go:117] "RemoveContainer" containerID="97d497703c712ffa8c149578238827a3a982c16d73a1a92f4e4951ef8285773b" Nov 25 15:50:26 crc kubenswrapper[4800]: I1125 15:50:26.855271 4800 scope.go:117] "RemoveContainer" containerID="5d05629313196fe1ac37421302bc0bdd3b37f0f75d2e1e3e54c85319586ff517" Nov 25 15:50:33 crc kubenswrapper[4800]: I1125 15:50:33.506500 4800 generic.go:334] "Generic (PLEG): container finished" podID="97e491f0-75a3-49ba-a6a3-1a3f1c6522bf" containerID="5015d1d592596102a55f8cac42b134addbb1ec5a5f09298977d4ab52dc4d0f0a" exitCode=0 Nov 25 15:50:33 crc kubenswrapper[4800]: I1125 15:50:33.506559 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" event={"ID":"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf","Type":"ContainerDied","Data":"5015d1d592596102a55f8cac42b134addbb1ec5a5f09298977d4ab52dc4d0f0a"} Nov 25 15:50:34 crc kubenswrapper[4800]: I1125 15:50:34.940215 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:35 crc kubenswrapper[4800]: I1125 15:50:35.043394 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-inventory\") pod \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\" (UID: \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\") " Nov 25 15:50:35 crc kubenswrapper[4800]: I1125 15:50:35.044053 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzvp5\" (UniqueName: \"kubernetes.io/projected/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-kube-api-access-bzvp5\") pod \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\" (UID: \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\") " Nov 25 15:50:35 crc kubenswrapper[4800]: I1125 15:50:35.044122 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-ssh-key\") pod \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\" (UID: \"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf\") " Nov 25 15:50:35 crc kubenswrapper[4800]: I1125 15:50:35.050314 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-kube-api-access-bzvp5" (OuterVolumeSpecName: "kube-api-access-bzvp5") pod "97e491f0-75a3-49ba-a6a3-1a3f1c6522bf" (UID: "97e491f0-75a3-49ba-a6a3-1a3f1c6522bf"). InnerVolumeSpecName "kube-api-access-bzvp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:50:35 crc kubenswrapper[4800]: I1125 15:50:35.078176 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "97e491f0-75a3-49ba-a6a3-1a3f1c6522bf" (UID: "97e491f0-75a3-49ba-a6a3-1a3f1c6522bf"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:50:35 crc kubenswrapper[4800]: I1125 15:50:35.088679 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-inventory" (OuterVolumeSpecName: "inventory") pod "97e491f0-75a3-49ba-a6a3-1a3f1c6522bf" (UID: "97e491f0-75a3-49ba-a6a3-1a3f1c6522bf"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:50:35 crc kubenswrapper[4800]: I1125 15:50:35.146981 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:50:35 crc kubenswrapper[4800]: I1125 15:50:35.147020 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzvp5\" (UniqueName: \"kubernetes.io/projected/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-kube-api-access-bzvp5\") on node \"crc\" DevicePath \"\"" Nov 25 15:50:35 crc kubenswrapper[4800]: I1125 15:50:35.147049 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:50:35 crc kubenswrapper[4800]: I1125 15:50:35.531346 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" event={"ID":"97e491f0-75a3-49ba-a6a3-1a3f1c6522bf","Type":"ContainerDied","Data":"e4b50fdb2e35fe119f887a39873e2869287be5940057a4724a98506654d45590"} Nov 25 15:50:35 crc kubenswrapper[4800]: I1125 15:50:35.531389 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4b50fdb2e35fe119f887a39873e2869287be5940057a4724a98506654d45590" Nov 25 15:50:35 crc kubenswrapper[4800]: I1125 15:50:35.531436 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6" Nov 25 15:50:40 crc kubenswrapper[4800]: I1125 15:50:40.050525 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5nblp"] Nov 25 15:50:40 crc kubenswrapper[4800]: I1125 15:50:40.058384 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5nblp"] Nov 25 15:50:41 crc kubenswrapper[4800]: I1125 15:50:41.799502 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07" path="/var/lib/kubelet/pods/8621b3fe-1d3c-45a9-897b-5cd1eb3f5a07/volumes" Nov 25 15:51:04 crc kubenswrapper[4800]: I1125 15:51:04.049463 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-v2d7j"] Nov 25 15:51:04 crc kubenswrapper[4800]: I1125 15:51:04.062649 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-v2d7j"] Nov 25 15:51:05 crc kubenswrapper[4800]: I1125 15:51:05.036515 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-84lxn"] Nov 25 15:51:05 crc kubenswrapper[4800]: I1125 15:51:05.046645 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-84lxn"] Nov 25 15:51:05 crc kubenswrapper[4800]: I1125 15:51:05.797908 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f050231-ed56-42ad-aeb8-4a79ed9ed00f" path="/var/lib/kubelet/pods/3f050231-ed56-42ad-aeb8-4a79ed9ed00f/volumes" Nov 25 15:51:05 crc kubenswrapper[4800]: I1125 15:51:05.799057 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1268195-54b6-4a47-bcb7-eb573bb91209" path="/var/lib/kubelet/pods/a1268195-54b6-4a47-bcb7-eb573bb91209/volumes" Nov 25 15:51:27 crc kubenswrapper[4800]: I1125 15:51:27.064742 4800 scope.go:117] "RemoveContainer" containerID="8cc7a09fd59fda2d84fff87b627a178d74a7daa78e88cec08de27cf6c01eaeb6" Nov 25 15:51:27 crc kubenswrapper[4800]: I1125 15:51:27.126012 4800 scope.go:117] "RemoveContainer" containerID="5e340500223746d502aa5fe0ab3187adbb7f252d3127a9b39dac6998f5009185" Nov 25 15:51:27 crc kubenswrapper[4800]: I1125 15:51:27.181051 4800 scope.go:117] "RemoveContainer" containerID="65fdf81506c1f3b0f9184d604c33756baf4d866f3d20d76d05c3e1ea701c8c41" Nov 25 15:51:49 crc kubenswrapper[4800]: I1125 15:51:49.067224 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-nntxf"] Nov 25 15:51:49 crc kubenswrapper[4800]: I1125 15:51:49.082742 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-nntxf"] Nov 25 15:51:49 crc kubenswrapper[4800]: I1125 15:51:49.810787 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33df2ae0-fba8-410b-8d25-4e8951046509" path="/var/lib/kubelet/pods/33df2ae0-fba8-410b-8d25-4e8951046509/volumes" Nov 25 15:52:27 crc kubenswrapper[4800]: I1125 15:52:27.298284 4800 scope.go:117] "RemoveContainer" containerID="97abb9e624feaeaff7994cc5c638718d8e68b4500f6add6435d62c6a932bdcdb" Nov 25 15:52:42 crc kubenswrapper[4800]: I1125 15:52:42.640595 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:52:42 crc kubenswrapper[4800]: I1125 15:52:42.641799 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:53:12 crc kubenswrapper[4800]: I1125 15:53:12.640452 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:53:12 crc kubenswrapper[4800]: I1125 15:53:12.641327 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.054093 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sfmfs"] Nov 25 15:53:18 crc kubenswrapper[4800]: E1125 15:53:18.055305 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97e491f0-75a3-49ba-a6a3-1a3f1c6522bf" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.055323 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="97e491f0-75a3-49ba-a6a3-1a3f1c6522bf" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.055570 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="97e491f0-75a3-49ba-a6a3-1a3f1c6522bf" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.057339 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.069983 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sfmfs"] Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.177771 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb5a1450-a108-4de5-82a3-b4e75fdd1099-utilities\") pod \"community-operators-sfmfs\" (UID: \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\") " pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.178222 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9m4k\" (UniqueName: \"kubernetes.io/projected/eb5a1450-a108-4de5-82a3-b4e75fdd1099-kube-api-access-q9m4k\") pod \"community-operators-sfmfs\" (UID: \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\") " pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.178498 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb5a1450-a108-4de5-82a3-b4e75fdd1099-catalog-content\") pod \"community-operators-sfmfs\" (UID: \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\") " pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.281466 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9m4k\" (UniqueName: \"kubernetes.io/projected/eb5a1450-a108-4de5-82a3-b4e75fdd1099-kube-api-access-q9m4k\") pod \"community-operators-sfmfs\" (UID: \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\") " pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.281567 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb5a1450-a108-4de5-82a3-b4e75fdd1099-catalog-content\") pod \"community-operators-sfmfs\" (UID: \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\") " pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.281618 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb5a1450-a108-4de5-82a3-b4e75fdd1099-utilities\") pod \"community-operators-sfmfs\" (UID: \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\") " pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.282207 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb5a1450-a108-4de5-82a3-b4e75fdd1099-utilities\") pod \"community-operators-sfmfs\" (UID: \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\") " pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.282634 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb5a1450-a108-4de5-82a3-b4e75fdd1099-catalog-content\") pod \"community-operators-sfmfs\" (UID: \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\") " pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.321529 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9m4k\" (UniqueName: \"kubernetes.io/projected/eb5a1450-a108-4de5-82a3-b4e75fdd1099-kube-api-access-q9m4k\") pod \"community-operators-sfmfs\" (UID: \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\") " pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.382400 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:18 crc kubenswrapper[4800]: I1125 15:53:18.947730 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sfmfs"] Nov 25 15:53:19 crc kubenswrapper[4800]: I1125 15:53:19.336857 4800 generic.go:334] "Generic (PLEG): container finished" podID="eb5a1450-a108-4de5-82a3-b4e75fdd1099" containerID="32fe453524973af9248b0c43cac5f4ec3f2791cea3948e35a98eeaf0c325d1f0" exitCode=0 Nov 25 15:53:19 crc kubenswrapper[4800]: I1125 15:53:19.336920 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sfmfs" event={"ID":"eb5a1450-a108-4de5-82a3-b4e75fdd1099","Type":"ContainerDied","Data":"32fe453524973af9248b0c43cac5f4ec3f2791cea3948e35a98eeaf0c325d1f0"} Nov 25 15:53:19 crc kubenswrapper[4800]: I1125 15:53:19.336964 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sfmfs" event={"ID":"eb5a1450-a108-4de5-82a3-b4e75fdd1099","Type":"ContainerStarted","Data":"e6c50f5e9e9bc537886ae30480b9544ef3d39b37249fc73bab21c2966c4b4f39"} Nov 25 15:53:19 crc kubenswrapper[4800]: I1125 15:53:19.339230 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 15:53:21 crc kubenswrapper[4800]: I1125 15:53:21.358281 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sfmfs" event={"ID":"eb5a1450-a108-4de5-82a3-b4e75fdd1099","Type":"ContainerStarted","Data":"a55c4e9f86ebc3a409bef4e7d722583496ef45b6a697266b8756ee886085aa08"} Nov 25 15:53:24 crc kubenswrapper[4800]: I1125 15:53:24.397909 4800 generic.go:334] "Generic (PLEG): container finished" podID="eb5a1450-a108-4de5-82a3-b4e75fdd1099" containerID="a55c4e9f86ebc3a409bef4e7d722583496ef45b6a697266b8756ee886085aa08" exitCode=0 Nov 25 15:53:24 crc kubenswrapper[4800]: I1125 15:53:24.397969 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sfmfs" event={"ID":"eb5a1450-a108-4de5-82a3-b4e75fdd1099","Type":"ContainerDied","Data":"a55c4e9f86ebc3a409bef4e7d722583496ef45b6a697266b8756ee886085aa08"} Nov 25 15:53:26 crc kubenswrapper[4800]: I1125 15:53:26.429013 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sfmfs" event={"ID":"eb5a1450-a108-4de5-82a3-b4e75fdd1099","Type":"ContainerStarted","Data":"8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7"} Nov 25 15:53:26 crc kubenswrapper[4800]: I1125 15:53:26.457492 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sfmfs" podStartSLOduration=2.617552196 podStartE2EDuration="8.457468s" podCreationTimestamp="2025-11-25 15:53:18 +0000 UTC" firstStartedPulling="2025-11-25 15:53:19.33899757 +0000 UTC m=+2160.393406052" lastFinishedPulling="2025-11-25 15:53:25.178913374 +0000 UTC m=+2166.233321856" observedRunningTime="2025-11-25 15:53:26.449975556 +0000 UTC m=+2167.504384058" watchObservedRunningTime="2025-11-25 15:53:26.457468 +0000 UTC m=+2167.511876492" Nov 25 15:53:28 crc kubenswrapper[4800]: I1125 15:53:28.382638 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:28 crc kubenswrapper[4800]: I1125 15:53:28.383028 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:28 crc kubenswrapper[4800]: I1125 15:53:28.445779 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:38 crc kubenswrapper[4800]: I1125 15:53:38.462956 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:38 crc kubenswrapper[4800]: I1125 15:53:38.530132 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sfmfs"] Nov 25 15:53:38 crc kubenswrapper[4800]: I1125 15:53:38.550971 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sfmfs" podUID="eb5a1450-a108-4de5-82a3-b4e75fdd1099" containerName="registry-server" containerID="cri-o://8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7" gracePeriod=2 Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.038983 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.087397 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9m4k\" (UniqueName: \"kubernetes.io/projected/eb5a1450-a108-4de5-82a3-b4e75fdd1099-kube-api-access-q9m4k\") pod \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\" (UID: \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\") " Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.087761 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb5a1450-a108-4de5-82a3-b4e75fdd1099-utilities\") pod \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\" (UID: \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\") " Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.087948 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb5a1450-a108-4de5-82a3-b4e75fdd1099-catalog-content\") pod \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\" (UID: \"eb5a1450-a108-4de5-82a3-b4e75fdd1099\") " Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.088552 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb5a1450-a108-4de5-82a3-b4e75fdd1099-utilities" (OuterVolumeSpecName: "utilities") pod "eb5a1450-a108-4de5-82a3-b4e75fdd1099" (UID: "eb5a1450-a108-4de5-82a3-b4e75fdd1099"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.096069 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb5a1450-a108-4de5-82a3-b4e75fdd1099-kube-api-access-q9m4k" (OuterVolumeSpecName: "kube-api-access-q9m4k") pod "eb5a1450-a108-4de5-82a3-b4e75fdd1099" (UID: "eb5a1450-a108-4de5-82a3-b4e75fdd1099"). InnerVolumeSpecName "kube-api-access-q9m4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.164259 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb5a1450-a108-4de5-82a3-b4e75fdd1099-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb5a1450-a108-4de5-82a3-b4e75fdd1099" (UID: "eb5a1450-a108-4de5-82a3-b4e75fdd1099"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.191007 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb5a1450-a108-4de5-82a3-b4e75fdd1099-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.191064 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb5a1450-a108-4de5-82a3-b4e75fdd1099-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.191078 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9m4k\" (UniqueName: \"kubernetes.io/projected/eb5a1450-a108-4de5-82a3-b4e75fdd1099-kube-api-access-q9m4k\") on node \"crc\" DevicePath \"\"" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.561617 4800 generic.go:334] "Generic (PLEG): container finished" podID="eb5a1450-a108-4de5-82a3-b4e75fdd1099" containerID="8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7" exitCode=0 Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.561709 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sfmfs" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.561695 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sfmfs" event={"ID":"eb5a1450-a108-4de5-82a3-b4e75fdd1099","Type":"ContainerDied","Data":"8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7"} Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.561784 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sfmfs" event={"ID":"eb5a1450-a108-4de5-82a3-b4e75fdd1099","Type":"ContainerDied","Data":"e6c50f5e9e9bc537886ae30480b9544ef3d39b37249fc73bab21c2966c4b4f39"} Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.561810 4800 scope.go:117] "RemoveContainer" containerID="8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.592568 4800 scope.go:117] "RemoveContainer" containerID="a55c4e9f86ebc3a409bef4e7d722583496ef45b6a697266b8756ee886085aa08" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.595580 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sfmfs"] Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.608466 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sfmfs"] Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.617695 4800 scope.go:117] "RemoveContainer" containerID="32fe453524973af9248b0c43cac5f4ec3f2791cea3948e35a98eeaf0c325d1f0" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.655940 4800 scope.go:117] "RemoveContainer" containerID="8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7" Nov 25 15:53:39 crc kubenswrapper[4800]: E1125 15:53:39.656491 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7\": container with ID starting with 8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7 not found: ID does not exist" containerID="8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.656528 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7"} err="failed to get container status \"8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7\": rpc error: code = NotFound desc = could not find container \"8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7\": container with ID starting with 8222421b03c5e16f634e7df3c2036a80f31591608b16e83be4965b24a76364c7 not found: ID does not exist" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.656556 4800 scope.go:117] "RemoveContainer" containerID="a55c4e9f86ebc3a409bef4e7d722583496ef45b6a697266b8756ee886085aa08" Nov 25 15:53:39 crc kubenswrapper[4800]: E1125 15:53:39.656821 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a55c4e9f86ebc3a409bef4e7d722583496ef45b6a697266b8756ee886085aa08\": container with ID starting with a55c4e9f86ebc3a409bef4e7d722583496ef45b6a697266b8756ee886085aa08 not found: ID does not exist" containerID="a55c4e9f86ebc3a409bef4e7d722583496ef45b6a697266b8756ee886085aa08" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.656943 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a55c4e9f86ebc3a409bef4e7d722583496ef45b6a697266b8756ee886085aa08"} err="failed to get container status \"a55c4e9f86ebc3a409bef4e7d722583496ef45b6a697266b8756ee886085aa08\": rpc error: code = NotFound desc = could not find container \"a55c4e9f86ebc3a409bef4e7d722583496ef45b6a697266b8756ee886085aa08\": container with ID starting with a55c4e9f86ebc3a409bef4e7d722583496ef45b6a697266b8756ee886085aa08 not found: ID does not exist" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.656964 4800 scope.go:117] "RemoveContainer" containerID="32fe453524973af9248b0c43cac5f4ec3f2791cea3948e35a98eeaf0c325d1f0" Nov 25 15:53:39 crc kubenswrapper[4800]: E1125 15:53:39.657255 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32fe453524973af9248b0c43cac5f4ec3f2791cea3948e35a98eeaf0c325d1f0\": container with ID starting with 32fe453524973af9248b0c43cac5f4ec3f2791cea3948e35a98eeaf0c325d1f0 not found: ID does not exist" containerID="32fe453524973af9248b0c43cac5f4ec3f2791cea3948e35a98eeaf0c325d1f0" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.657278 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32fe453524973af9248b0c43cac5f4ec3f2791cea3948e35a98eeaf0c325d1f0"} err="failed to get container status \"32fe453524973af9248b0c43cac5f4ec3f2791cea3948e35a98eeaf0c325d1f0\": rpc error: code = NotFound desc = could not find container \"32fe453524973af9248b0c43cac5f4ec3f2791cea3948e35a98eeaf0c325d1f0\": container with ID starting with 32fe453524973af9248b0c43cac5f4ec3f2791cea3948e35a98eeaf0c325d1f0 not found: ID does not exist" Nov 25 15:53:39 crc kubenswrapper[4800]: I1125 15:53:39.798240 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb5a1450-a108-4de5-82a3-b4e75fdd1099" path="/var/lib/kubelet/pods/eb5a1450-a108-4de5-82a3-b4e75fdd1099/volumes" Nov 25 15:53:42 crc kubenswrapper[4800]: I1125 15:53:42.640421 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:53:42 crc kubenswrapper[4800]: I1125 15:53:42.640869 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:53:42 crc kubenswrapper[4800]: I1125 15:53:42.640944 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:53:42 crc kubenswrapper[4800]: I1125 15:53:42.642007 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d56ce1859034223339fd1fd96e8443b09843fa53af7fd449295e1c611c5e11ab"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 15:53:42 crc kubenswrapper[4800]: I1125 15:53:42.642110 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://d56ce1859034223339fd1fd96e8443b09843fa53af7fd449295e1c611c5e11ab" gracePeriod=600 Nov 25 15:53:43 crc kubenswrapper[4800]: I1125 15:53:43.604605 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="d56ce1859034223339fd1fd96e8443b09843fa53af7fd449295e1c611c5e11ab" exitCode=0 Nov 25 15:53:43 crc kubenswrapper[4800]: I1125 15:53:43.604701 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"d56ce1859034223339fd1fd96e8443b09843fa53af7fd449295e1c611c5e11ab"} Nov 25 15:53:43 crc kubenswrapper[4800]: I1125 15:53:43.605046 4800 scope.go:117] "RemoveContainer" containerID="f69c13bbc156bfef22cbdb08a010a989f672b03cbc41558ba6c7a1801a06a75f" Nov 25 15:53:44 crc kubenswrapper[4800]: I1125 15:53:44.618923 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724"} Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.534986 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r94hw"] Nov 25 15:53:55 crc kubenswrapper[4800]: E1125 15:53:55.536380 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb5a1450-a108-4de5-82a3-b4e75fdd1099" containerName="extract-content" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.536401 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb5a1450-a108-4de5-82a3-b4e75fdd1099" containerName="extract-content" Nov 25 15:53:55 crc kubenswrapper[4800]: E1125 15:53:55.536420 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb5a1450-a108-4de5-82a3-b4e75fdd1099" containerName="registry-server" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.536426 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb5a1450-a108-4de5-82a3-b4e75fdd1099" containerName="registry-server" Nov 25 15:53:55 crc kubenswrapper[4800]: E1125 15:53:55.536440 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb5a1450-a108-4de5-82a3-b4e75fdd1099" containerName="extract-utilities" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.536449 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb5a1450-a108-4de5-82a3-b4e75fdd1099" containerName="extract-utilities" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.536648 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb5a1450-a108-4de5-82a3-b4e75fdd1099" containerName="registry-server" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.538421 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.557216 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r94hw"] Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.622183 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-catalog-content\") pod \"redhat-operators-r94hw\" (UID: \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\") " pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.622234 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkdh9\" (UniqueName: \"kubernetes.io/projected/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-kube-api-access-gkdh9\") pod \"redhat-operators-r94hw\" (UID: \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\") " pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.622709 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-utilities\") pod \"redhat-operators-r94hw\" (UID: \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\") " pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.724743 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-catalog-content\") pod \"redhat-operators-r94hw\" (UID: \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\") " pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.725394 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkdh9\" (UniqueName: \"kubernetes.io/projected/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-kube-api-access-gkdh9\") pod \"redhat-operators-r94hw\" (UID: \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\") " pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.725480 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-catalog-content\") pod \"redhat-operators-r94hw\" (UID: \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\") " pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.725509 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-utilities\") pod \"redhat-operators-r94hw\" (UID: \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\") " pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.726125 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-utilities\") pod \"redhat-operators-r94hw\" (UID: \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\") " pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.748097 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkdh9\" (UniqueName: \"kubernetes.io/projected/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-kube-api-access-gkdh9\") pod \"redhat-operators-r94hw\" (UID: \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\") " pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:53:55 crc kubenswrapper[4800]: I1125 15:53:55.867504 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:53:56 crc kubenswrapper[4800]: I1125 15:53:56.387572 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r94hw"] Nov 25 15:53:56 crc kubenswrapper[4800]: I1125 15:53:56.778763 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r94hw" event={"ID":"0e007e5c-ed28-4a0a-853b-0b9cd661e75e","Type":"ContainerStarted","Data":"c947a32036138bf7d8039c0b8bd4c3c3bb9bebaf65e8d00652e35f30b87cf159"} Nov 25 15:53:57 crc kubenswrapper[4800]: I1125 15:53:57.802998 4800 generic.go:334] "Generic (PLEG): container finished" podID="0e007e5c-ed28-4a0a-853b-0b9cd661e75e" containerID="567bc7e9dd17b9fe42ca66bb67b0e7eafe166700df7803005c8af2d172ff8c53" exitCode=0 Nov 25 15:53:57 crc kubenswrapper[4800]: I1125 15:53:57.803052 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r94hw" event={"ID":"0e007e5c-ed28-4a0a-853b-0b9cd661e75e","Type":"ContainerDied","Data":"567bc7e9dd17b9fe42ca66bb67b0e7eafe166700df7803005c8af2d172ff8c53"} Nov 25 15:54:03 crc kubenswrapper[4800]: I1125 15:54:03.891637 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r94hw" event={"ID":"0e007e5c-ed28-4a0a-853b-0b9cd661e75e","Type":"ContainerStarted","Data":"260a8bf867ddaa328fc390e8b91dd542b5a876a7d0d6fff056b71a9b99945022"} Nov 25 15:54:04 crc kubenswrapper[4800]: I1125 15:54:04.904090 4800 generic.go:334] "Generic (PLEG): container finished" podID="0e007e5c-ed28-4a0a-853b-0b9cd661e75e" containerID="260a8bf867ddaa328fc390e8b91dd542b5a876a7d0d6fff056b71a9b99945022" exitCode=0 Nov 25 15:54:04 crc kubenswrapper[4800]: I1125 15:54:04.904183 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r94hw" event={"ID":"0e007e5c-ed28-4a0a-853b-0b9cd661e75e","Type":"ContainerDied","Data":"260a8bf867ddaa328fc390e8b91dd542b5a876a7d0d6fff056b71a9b99945022"} Nov 25 15:54:05 crc kubenswrapper[4800]: I1125 15:54:05.922184 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r94hw" event={"ID":"0e007e5c-ed28-4a0a-853b-0b9cd661e75e","Type":"ContainerStarted","Data":"063a7f1239af9141e895d86f99290d7c7cf8f332da90471cefbf532ae5965102"} Nov 25 15:54:05 crc kubenswrapper[4800]: I1125 15:54:05.954173 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r94hw" podStartSLOduration=3.134793946 podStartE2EDuration="10.954141313s" podCreationTimestamp="2025-11-25 15:53:55 +0000 UTC" firstStartedPulling="2025-11-25 15:53:57.805397464 +0000 UTC m=+2198.859805946" lastFinishedPulling="2025-11-25 15:54:05.62474481 +0000 UTC m=+2206.679153313" observedRunningTime="2025-11-25 15:54:05.944640234 +0000 UTC m=+2206.999048746" watchObservedRunningTime="2025-11-25 15:54:05.954141313 +0000 UTC m=+2207.008549795" Nov 25 15:54:15 crc kubenswrapper[4800]: I1125 15:54:15.867859 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:54:15 crc kubenswrapper[4800]: I1125 15:54:15.868660 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:54:15 crc kubenswrapper[4800]: I1125 15:54:15.920536 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:54:16 crc kubenswrapper[4800]: I1125 15:54:16.090335 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:54:16 crc kubenswrapper[4800]: I1125 15:54:16.161498 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r94hw"] Nov 25 15:54:18 crc kubenswrapper[4800]: I1125 15:54:18.039235 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r94hw" podUID="0e007e5c-ed28-4a0a-853b-0b9cd661e75e" containerName="registry-server" containerID="cri-o://063a7f1239af9141e895d86f99290d7c7cf8f332da90471cefbf532ae5965102" gracePeriod=2 Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.055981 4800 generic.go:334] "Generic (PLEG): container finished" podID="0e007e5c-ed28-4a0a-853b-0b9cd661e75e" containerID="063a7f1239af9141e895d86f99290d7c7cf8f332da90471cefbf532ae5965102" exitCode=0 Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.056173 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r94hw" event={"ID":"0e007e5c-ed28-4a0a-853b-0b9cd661e75e","Type":"ContainerDied","Data":"063a7f1239af9141e895d86f99290d7c7cf8f332da90471cefbf532ae5965102"} Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.056377 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r94hw" event={"ID":"0e007e5c-ed28-4a0a-853b-0b9cd661e75e","Type":"ContainerDied","Data":"c947a32036138bf7d8039c0b8bd4c3c3bb9bebaf65e8d00652e35f30b87cf159"} Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.056402 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c947a32036138bf7d8039c0b8bd4c3c3bb9bebaf65e8d00652e35f30b87cf159" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.133485 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.217503 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-catalog-content\") pod \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\" (UID: \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\") " Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.217645 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkdh9\" (UniqueName: \"kubernetes.io/projected/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-kube-api-access-gkdh9\") pod \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\" (UID: \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\") " Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.217923 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-utilities\") pod \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\" (UID: \"0e007e5c-ed28-4a0a-853b-0b9cd661e75e\") " Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.219542 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-utilities" (OuterVolumeSpecName: "utilities") pod "0e007e5c-ed28-4a0a-853b-0b9cd661e75e" (UID: "0e007e5c-ed28-4a0a-853b-0b9cd661e75e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.226396 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-kube-api-access-gkdh9" (OuterVolumeSpecName: "kube-api-access-gkdh9") pod "0e007e5c-ed28-4a0a-853b-0b9cd661e75e" (UID: "0e007e5c-ed28-4a0a-853b-0b9cd661e75e"). InnerVolumeSpecName "kube-api-access-gkdh9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.310218 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0e007e5c-ed28-4a0a-853b-0b9cd661e75e" (UID: "0e007e5c-ed28-4a0a-853b-0b9cd661e75e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.321135 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.321192 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.321213 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkdh9\" (UniqueName: \"kubernetes.io/projected/0e007e5c-ed28-4a0a-853b-0b9cd661e75e-kube-api-access-gkdh9\") on node \"crc\" DevicePath \"\"" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.592938 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nn9gz"] Nov 25 15:54:19 crc kubenswrapper[4800]: E1125 15:54:19.593464 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e007e5c-ed28-4a0a-853b-0b9cd661e75e" containerName="extract-utilities" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.593481 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e007e5c-ed28-4a0a-853b-0b9cd661e75e" containerName="extract-utilities" Nov 25 15:54:19 crc kubenswrapper[4800]: E1125 15:54:19.593508 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e007e5c-ed28-4a0a-853b-0b9cd661e75e" containerName="registry-server" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.593514 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e007e5c-ed28-4a0a-853b-0b9cd661e75e" containerName="registry-server" Nov 25 15:54:19 crc kubenswrapper[4800]: E1125 15:54:19.593563 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e007e5c-ed28-4a0a-853b-0b9cd661e75e" containerName="extract-content" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.593570 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e007e5c-ed28-4a0a-853b-0b9cd661e75e" containerName="extract-content" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.593806 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e007e5c-ed28-4a0a-853b-0b9cd661e75e" containerName="registry-server" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.595568 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.607011 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nn9gz"] Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.636895 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b53dd104-c53d-4f39-b6b5-1edae7049f89-utilities\") pod \"certified-operators-nn9gz\" (UID: \"b53dd104-c53d-4f39-b6b5-1edae7049f89\") " pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.637113 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s29t8\" (UniqueName: \"kubernetes.io/projected/b53dd104-c53d-4f39-b6b5-1edae7049f89-kube-api-access-s29t8\") pod \"certified-operators-nn9gz\" (UID: \"b53dd104-c53d-4f39-b6b5-1edae7049f89\") " pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.637171 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b53dd104-c53d-4f39-b6b5-1edae7049f89-catalog-content\") pod \"certified-operators-nn9gz\" (UID: \"b53dd104-c53d-4f39-b6b5-1edae7049f89\") " pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.738850 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b53dd104-c53d-4f39-b6b5-1edae7049f89-utilities\") pod \"certified-operators-nn9gz\" (UID: \"b53dd104-c53d-4f39-b6b5-1edae7049f89\") " pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.738974 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s29t8\" (UniqueName: \"kubernetes.io/projected/b53dd104-c53d-4f39-b6b5-1edae7049f89-kube-api-access-s29t8\") pod \"certified-operators-nn9gz\" (UID: \"b53dd104-c53d-4f39-b6b5-1edae7049f89\") " pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.739020 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b53dd104-c53d-4f39-b6b5-1edae7049f89-catalog-content\") pod \"certified-operators-nn9gz\" (UID: \"b53dd104-c53d-4f39-b6b5-1edae7049f89\") " pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.739527 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b53dd104-c53d-4f39-b6b5-1edae7049f89-catalog-content\") pod \"certified-operators-nn9gz\" (UID: \"b53dd104-c53d-4f39-b6b5-1edae7049f89\") " pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.739751 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b53dd104-c53d-4f39-b6b5-1edae7049f89-utilities\") pod \"certified-operators-nn9gz\" (UID: \"b53dd104-c53d-4f39-b6b5-1edae7049f89\") " pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:19 crc kubenswrapper[4800]: I1125 15:54:19.762524 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s29t8\" (UniqueName: \"kubernetes.io/projected/b53dd104-c53d-4f39-b6b5-1edae7049f89-kube-api-access-s29t8\") pod \"certified-operators-nn9gz\" (UID: \"b53dd104-c53d-4f39-b6b5-1edae7049f89\") " pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:20 crc kubenswrapper[4800]: I1125 15:54:20.005029 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:20 crc kubenswrapper[4800]: I1125 15:54:20.063829 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r94hw" Nov 25 15:54:20 crc kubenswrapper[4800]: I1125 15:54:20.162417 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r94hw"] Nov 25 15:54:20 crc kubenswrapper[4800]: I1125 15:54:20.176003 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r94hw"] Nov 25 15:54:20 crc kubenswrapper[4800]: I1125 15:54:20.543588 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nn9gz"] Nov 25 15:54:21 crc kubenswrapper[4800]: I1125 15:54:21.074419 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nn9gz" event={"ID":"b53dd104-c53d-4f39-b6b5-1edae7049f89","Type":"ContainerStarted","Data":"c9a608b360c3a80659d84ceb776e4c5d8e1c70b7f852509a3fe17562c536aa4f"} Nov 25 15:54:21 crc kubenswrapper[4800]: I1125 15:54:21.074490 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nn9gz" event={"ID":"b53dd104-c53d-4f39-b6b5-1edae7049f89","Type":"ContainerStarted","Data":"985249dec62fb9a471f45573650039dee402d79c5fa2182a1f861096eeb02d70"} Nov 25 15:54:21 crc kubenswrapper[4800]: I1125 15:54:21.799304 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e007e5c-ed28-4a0a-853b-0b9cd661e75e" path="/var/lib/kubelet/pods/0e007e5c-ed28-4a0a-853b-0b9cd661e75e/volumes" Nov 25 15:54:22 crc kubenswrapper[4800]: I1125 15:54:22.085614 4800 generic.go:334] "Generic (PLEG): container finished" podID="b53dd104-c53d-4f39-b6b5-1edae7049f89" containerID="c9a608b360c3a80659d84ceb776e4c5d8e1c70b7f852509a3fe17562c536aa4f" exitCode=0 Nov 25 15:54:22 crc kubenswrapper[4800]: I1125 15:54:22.085663 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nn9gz" event={"ID":"b53dd104-c53d-4f39-b6b5-1edae7049f89","Type":"ContainerDied","Data":"c9a608b360c3a80659d84ceb776e4c5d8e1c70b7f852509a3fe17562c536aa4f"} Nov 25 15:54:23 crc kubenswrapper[4800]: I1125 15:54:23.102061 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nn9gz" event={"ID":"b53dd104-c53d-4f39-b6b5-1edae7049f89","Type":"ContainerStarted","Data":"45a6103f1f17afee04cdddbbd25c0d529d3580bff4fa6a32bbf0f2c424a4ef3c"} Nov 25 15:54:24 crc kubenswrapper[4800]: I1125 15:54:24.116654 4800 generic.go:334] "Generic (PLEG): container finished" podID="b53dd104-c53d-4f39-b6b5-1edae7049f89" containerID="45a6103f1f17afee04cdddbbd25c0d529d3580bff4fa6a32bbf0f2c424a4ef3c" exitCode=0 Nov 25 15:54:24 crc kubenswrapper[4800]: I1125 15:54:24.116721 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nn9gz" event={"ID":"b53dd104-c53d-4f39-b6b5-1edae7049f89","Type":"ContainerDied","Data":"45a6103f1f17afee04cdddbbd25c0d529d3580bff4fa6a32bbf0f2c424a4ef3c"} Nov 25 15:54:25 crc kubenswrapper[4800]: I1125 15:54:25.131817 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nn9gz" event={"ID":"b53dd104-c53d-4f39-b6b5-1edae7049f89","Type":"ContainerStarted","Data":"94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff"} Nov 25 15:54:25 crc kubenswrapper[4800]: I1125 15:54:25.164724 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nn9gz" podStartSLOduration=3.679681801 podStartE2EDuration="6.164698022s" podCreationTimestamp="2025-11-25 15:54:19 +0000 UTC" firstStartedPulling="2025-11-25 15:54:22.087612377 +0000 UTC m=+2223.142020859" lastFinishedPulling="2025-11-25 15:54:24.572628598 +0000 UTC m=+2225.627037080" observedRunningTime="2025-11-25 15:54:25.155333587 +0000 UTC m=+2226.209742079" watchObservedRunningTime="2025-11-25 15:54:25.164698022 +0000 UTC m=+2226.219106504" Nov 25 15:54:27 crc kubenswrapper[4800]: E1125 15:54:27.695934 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e007e5c_ed28_4a0a_853b_0b9cd661e75e.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e007e5c_ed28_4a0a_853b_0b9cd661e75e.slice/crio-c947a32036138bf7d8039c0b8bd4c3c3bb9bebaf65e8d00652e35f30b87cf159\": RecentStats: unable to find data in memory cache]" Nov 25 15:54:30 crc kubenswrapper[4800]: I1125 15:54:30.006184 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:30 crc kubenswrapper[4800]: I1125 15:54:30.006568 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:30 crc kubenswrapper[4800]: I1125 15:54:30.057377 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:30 crc kubenswrapper[4800]: I1125 15:54:30.231071 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:30 crc kubenswrapper[4800]: I1125 15:54:30.303811 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nn9gz"] Nov 25 15:54:32 crc kubenswrapper[4800]: I1125 15:54:32.203469 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nn9gz" podUID="b53dd104-c53d-4f39-b6b5-1edae7049f89" containerName="registry-server" containerID="cri-o://94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff" gracePeriod=2 Nov 25 15:54:32 crc kubenswrapper[4800]: I1125 15:54:32.669090 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:32 crc kubenswrapper[4800]: I1125 15:54:32.797362 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s29t8\" (UniqueName: \"kubernetes.io/projected/b53dd104-c53d-4f39-b6b5-1edae7049f89-kube-api-access-s29t8\") pod \"b53dd104-c53d-4f39-b6b5-1edae7049f89\" (UID: \"b53dd104-c53d-4f39-b6b5-1edae7049f89\") " Nov 25 15:54:32 crc kubenswrapper[4800]: I1125 15:54:32.797938 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b53dd104-c53d-4f39-b6b5-1edae7049f89-catalog-content\") pod \"b53dd104-c53d-4f39-b6b5-1edae7049f89\" (UID: \"b53dd104-c53d-4f39-b6b5-1edae7049f89\") " Nov 25 15:54:32 crc kubenswrapper[4800]: I1125 15:54:32.798231 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b53dd104-c53d-4f39-b6b5-1edae7049f89-utilities\") pod \"b53dd104-c53d-4f39-b6b5-1edae7049f89\" (UID: \"b53dd104-c53d-4f39-b6b5-1edae7049f89\") " Nov 25 15:54:32 crc kubenswrapper[4800]: I1125 15:54:32.799131 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b53dd104-c53d-4f39-b6b5-1edae7049f89-utilities" (OuterVolumeSpecName: "utilities") pod "b53dd104-c53d-4f39-b6b5-1edae7049f89" (UID: "b53dd104-c53d-4f39-b6b5-1edae7049f89"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:54:32 crc kubenswrapper[4800]: I1125 15:54:32.804970 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b53dd104-c53d-4f39-b6b5-1edae7049f89-kube-api-access-s29t8" (OuterVolumeSpecName: "kube-api-access-s29t8") pod "b53dd104-c53d-4f39-b6b5-1edae7049f89" (UID: "b53dd104-c53d-4f39-b6b5-1edae7049f89"). InnerVolumeSpecName "kube-api-access-s29t8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:54:32 crc kubenswrapper[4800]: I1125 15:54:32.857229 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b53dd104-c53d-4f39-b6b5-1edae7049f89-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b53dd104-c53d-4f39-b6b5-1edae7049f89" (UID: "b53dd104-c53d-4f39-b6b5-1edae7049f89"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:54:32 crc kubenswrapper[4800]: I1125 15:54:32.900668 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b53dd104-c53d-4f39-b6b5-1edae7049f89-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:54:32 crc kubenswrapper[4800]: I1125 15:54:32.900719 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b53dd104-c53d-4f39-b6b5-1edae7049f89-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:54:32 crc kubenswrapper[4800]: I1125 15:54:32.900734 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s29t8\" (UniqueName: \"kubernetes.io/projected/b53dd104-c53d-4f39-b6b5-1edae7049f89-kube-api-access-s29t8\") on node \"crc\" DevicePath \"\"" Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.217032 4800 generic.go:334] "Generic (PLEG): container finished" podID="b53dd104-c53d-4f39-b6b5-1edae7049f89" containerID="94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff" exitCode=0 Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.217101 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nn9gz" event={"ID":"b53dd104-c53d-4f39-b6b5-1edae7049f89","Type":"ContainerDied","Data":"94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff"} Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.217157 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nn9gz" event={"ID":"b53dd104-c53d-4f39-b6b5-1edae7049f89","Type":"ContainerDied","Data":"985249dec62fb9a471f45573650039dee402d79c5fa2182a1f861096eeb02d70"} Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.217191 4800 scope.go:117] "RemoveContainer" containerID="94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff" Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.217386 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nn9gz" Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.246358 4800 scope.go:117] "RemoveContainer" containerID="45a6103f1f17afee04cdddbbd25c0d529d3580bff4fa6a32bbf0f2c424a4ef3c" Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.286179 4800 scope.go:117] "RemoveContainer" containerID="c9a608b360c3a80659d84ceb776e4c5d8e1c70b7f852509a3fe17562c536aa4f" Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.291522 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nn9gz"] Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.303985 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nn9gz"] Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.349171 4800 scope.go:117] "RemoveContainer" containerID="94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff" Nov 25 15:54:33 crc kubenswrapper[4800]: E1125 15:54:33.350288 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff\": container with ID starting with 94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff not found: ID does not exist" containerID="94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff" Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.350325 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff"} err="failed to get container status \"94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff\": rpc error: code = NotFound desc = could not find container \"94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff\": container with ID starting with 94dd7f1875cd2c1091aefa03d8d031515375aa87b0f7af9536261470adc2a0ff not found: ID does not exist" Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.350359 4800 scope.go:117] "RemoveContainer" containerID="45a6103f1f17afee04cdddbbd25c0d529d3580bff4fa6a32bbf0f2c424a4ef3c" Nov 25 15:54:33 crc kubenswrapper[4800]: E1125 15:54:33.351044 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45a6103f1f17afee04cdddbbd25c0d529d3580bff4fa6a32bbf0f2c424a4ef3c\": container with ID starting with 45a6103f1f17afee04cdddbbd25c0d529d3580bff4fa6a32bbf0f2c424a4ef3c not found: ID does not exist" containerID="45a6103f1f17afee04cdddbbd25c0d529d3580bff4fa6a32bbf0f2c424a4ef3c" Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.351186 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45a6103f1f17afee04cdddbbd25c0d529d3580bff4fa6a32bbf0f2c424a4ef3c"} err="failed to get container status \"45a6103f1f17afee04cdddbbd25c0d529d3580bff4fa6a32bbf0f2c424a4ef3c\": rpc error: code = NotFound desc = could not find container \"45a6103f1f17afee04cdddbbd25c0d529d3580bff4fa6a32bbf0f2c424a4ef3c\": container with ID starting with 45a6103f1f17afee04cdddbbd25c0d529d3580bff4fa6a32bbf0f2c424a4ef3c not found: ID does not exist" Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.351289 4800 scope.go:117] "RemoveContainer" containerID="c9a608b360c3a80659d84ceb776e4c5d8e1c70b7f852509a3fe17562c536aa4f" Nov 25 15:54:33 crc kubenswrapper[4800]: E1125 15:54:33.351872 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9a608b360c3a80659d84ceb776e4c5d8e1c70b7f852509a3fe17562c536aa4f\": container with ID starting with c9a608b360c3a80659d84ceb776e4c5d8e1c70b7f852509a3fe17562c536aa4f not found: ID does not exist" containerID="c9a608b360c3a80659d84ceb776e4c5d8e1c70b7f852509a3fe17562c536aa4f" Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.351917 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9a608b360c3a80659d84ceb776e4c5d8e1c70b7f852509a3fe17562c536aa4f"} err="failed to get container status \"c9a608b360c3a80659d84ceb776e4c5d8e1c70b7f852509a3fe17562c536aa4f\": rpc error: code = NotFound desc = could not find container \"c9a608b360c3a80659d84ceb776e4c5d8e1c70b7f852509a3fe17562c536aa4f\": container with ID starting with c9a608b360c3a80659d84ceb776e4c5d8e1c70b7f852509a3fe17562c536aa4f not found: ID does not exist" Nov 25 15:54:33 crc kubenswrapper[4800]: I1125 15:54:33.798653 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b53dd104-c53d-4f39-b6b5-1edae7049f89" path="/var/lib/kubelet/pods/b53dd104-c53d-4f39-b6b5-1edae7049f89/volumes" Nov 25 15:54:37 crc kubenswrapper[4800]: E1125 15:54:37.980666 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e007e5c_ed28_4a0a_853b_0b9cd661e75e.slice/crio-c947a32036138bf7d8039c0b8bd4c3c3bb9bebaf65e8d00652e35f30b87cf159\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e007e5c_ed28_4a0a_853b_0b9cd661e75e.slice\": RecentStats: unable to find data in memory cache]" Nov 25 15:54:48 crc kubenswrapper[4800]: E1125 15:54:48.237316 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e007e5c_ed28_4a0a_853b_0b9cd661e75e.slice/crio-c947a32036138bf7d8039c0b8bd4c3c3bb9bebaf65e8d00652e35f30b87cf159\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e007e5c_ed28_4a0a_853b_0b9cd661e75e.slice\": RecentStats: unable to find data in memory cache]" Nov 25 15:54:58 crc kubenswrapper[4800]: E1125 15:54:58.473284 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e007e5c_ed28_4a0a_853b_0b9cd661e75e.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e007e5c_ed28_4a0a_853b_0b9cd661e75e.slice/crio-c947a32036138bf7d8039c0b8bd4c3c3bb9bebaf65e8d00652e35f30b87cf159\": RecentStats: unable to find data in memory cache]" Nov 25 15:55:02 crc kubenswrapper[4800]: E1125 15:55:02.874444 4800 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.09s" Nov 25 15:55:07 crc kubenswrapper[4800]: I1125 15:55:07.811891 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-88ztj"] Nov 25 15:55:07 crc kubenswrapper[4800]: E1125 15:55:07.813191 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b53dd104-c53d-4f39-b6b5-1edae7049f89" containerName="extract-content" Nov 25 15:55:07 crc kubenswrapper[4800]: I1125 15:55:07.813215 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b53dd104-c53d-4f39-b6b5-1edae7049f89" containerName="extract-content" Nov 25 15:55:07 crc kubenswrapper[4800]: E1125 15:55:07.813243 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b53dd104-c53d-4f39-b6b5-1edae7049f89" containerName="registry-server" Nov 25 15:55:07 crc kubenswrapper[4800]: I1125 15:55:07.813256 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b53dd104-c53d-4f39-b6b5-1edae7049f89" containerName="registry-server" Nov 25 15:55:07 crc kubenswrapper[4800]: E1125 15:55:07.813287 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b53dd104-c53d-4f39-b6b5-1edae7049f89" containerName="extract-utilities" Nov 25 15:55:07 crc kubenswrapper[4800]: I1125 15:55:07.813301 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b53dd104-c53d-4f39-b6b5-1edae7049f89" containerName="extract-utilities" Nov 25 15:55:07 crc kubenswrapper[4800]: I1125 15:55:07.813638 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b53dd104-c53d-4f39-b6b5-1edae7049f89" containerName="registry-server" Nov 25 15:55:07 crc kubenswrapper[4800]: I1125 15:55:07.816558 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-88ztj"] Nov 25 15:55:07 crc kubenswrapper[4800]: I1125 15:55:07.816717 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:07 crc kubenswrapper[4800]: I1125 15:55:07.950653 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fds5h\" (UniqueName: \"kubernetes.io/projected/0db6c444-b65d-4275-9b51-b0f48b7acf80-kube-api-access-fds5h\") pod \"redhat-marketplace-88ztj\" (UID: \"0db6c444-b65d-4275-9b51-b0f48b7acf80\") " pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:07 crc kubenswrapper[4800]: I1125 15:55:07.950986 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0db6c444-b65d-4275-9b51-b0f48b7acf80-catalog-content\") pod \"redhat-marketplace-88ztj\" (UID: \"0db6c444-b65d-4275-9b51-b0f48b7acf80\") " pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:07 crc kubenswrapper[4800]: I1125 15:55:07.951139 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0db6c444-b65d-4275-9b51-b0f48b7acf80-utilities\") pod \"redhat-marketplace-88ztj\" (UID: \"0db6c444-b65d-4275-9b51-b0f48b7acf80\") " pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:08 crc kubenswrapper[4800]: I1125 15:55:08.052450 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0db6c444-b65d-4275-9b51-b0f48b7acf80-utilities\") pod \"redhat-marketplace-88ztj\" (UID: \"0db6c444-b65d-4275-9b51-b0f48b7acf80\") " pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:08 crc kubenswrapper[4800]: I1125 15:55:08.052599 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fds5h\" (UniqueName: \"kubernetes.io/projected/0db6c444-b65d-4275-9b51-b0f48b7acf80-kube-api-access-fds5h\") pod \"redhat-marketplace-88ztj\" (UID: \"0db6c444-b65d-4275-9b51-b0f48b7acf80\") " pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:08 crc kubenswrapper[4800]: I1125 15:55:08.052638 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0db6c444-b65d-4275-9b51-b0f48b7acf80-catalog-content\") pod \"redhat-marketplace-88ztj\" (UID: \"0db6c444-b65d-4275-9b51-b0f48b7acf80\") " pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:08 crc kubenswrapper[4800]: I1125 15:55:08.053031 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0db6c444-b65d-4275-9b51-b0f48b7acf80-utilities\") pod \"redhat-marketplace-88ztj\" (UID: \"0db6c444-b65d-4275-9b51-b0f48b7acf80\") " pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:08 crc kubenswrapper[4800]: I1125 15:55:08.053196 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0db6c444-b65d-4275-9b51-b0f48b7acf80-catalog-content\") pod \"redhat-marketplace-88ztj\" (UID: \"0db6c444-b65d-4275-9b51-b0f48b7acf80\") " pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:08 crc kubenswrapper[4800]: I1125 15:55:08.077098 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fds5h\" (UniqueName: \"kubernetes.io/projected/0db6c444-b65d-4275-9b51-b0f48b7acf80-kube-api-access-fds5h\") pod \"redhat-marketplace-88ztj\" (UID: \"0db6c444-b65d-4275-9b51-b0f48b7acf80\") " pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:08 crc kubenswrapper[4800]: I1125 15:55:08.142464 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:08 crc kubenswrapper[4800]: I1125 15:55:08.668981 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-88ztj"] Nov 25 15:55:08 crc kubenswrapper[4800]: E1125 15:55:08.722550 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e007e5c_ed28_4a0a_853b_0b9cd661e75e.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e007e5c_ed28_4a0a_853b_0b9cd661e75e.slice/crio-c947a32036138bf7d8039c0b8bd4c3c3bb9bebaf65e8d00652e35f30b87cf159\": RecentStats: unable to find data in memory cache]" Nov 25 15:55:09 crc kubenswrapper[4800]: I1125 15:55:09.591885 4800 generic.go:334] "Generic (PLEG): container finished" podID="0db6c444-b65d-4275-9b51-b0f48b7acf80" containerID="4f804455c5ab039dfdad9e63411fb537e6f46b40f333c7e4e4594e13dc1154d1" exitCode=0 Nov 25 15:55:09 crc kubenswrapper[4800]: I1125 15:55:09.592026 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88ztj" event={"ID":"0db6c444-b65d-4275-9b51-b0f48b7acf80","Type":"ContainerDied","Data":"4f804455c5ab039dfdad9e63411fb537e6f46b40f333c7e4e4594e13dc1154d1"} Nov 25 15:55:09 crc kubenswrapper[4800]: I1125 15:55:09.592626 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88ztj" event={"ID":"0db6c444-b65d-4275-9b51-b0f48b7acf80","Type":"ContainerStarted","Data":"d2881adf7a711de18ff83672df25d58297033ecdabacfb25e4660c1774e1a2b8"} Nov 25 15:55:13 crc kubenswrapper[4800]: I1125 15:55:13.654296 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88ztj" event={"ID":"0db6c444-b65d-4275-9b51-b0f48b7acf80","Type":"ContainerStarted","Data":"d14596dafa1c721239dec0e134e60da93b47242f83a92628aa767b6f5c252051"} Nov 25 15:55:14 crc kubenswrapper[4800]: I1125 15:55:14.666666 4800 generic.go:334] "Generic (PLEG): container finished" podID="0db6c444-b65d-4275-9b51-b0f48b7acf80" containerID="d14596dafa1c721239dec0e134e60da93b47242f83a92628aa767b6f5c252051" exitCode=0 Nov 25 15:55:14 crc kubenswrapper[4800]: I1125 15:55:14.666787 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88ztj" event={"ID":"0db6c444-b65d-4275-9b51-b0f48b7acf80","Type":"ContainerDied","Data":"d14596dafa1c721239dec0e134e60da93b47242f83a92628aa767b6f5c252051"} Nov 25 15:55:17 crc kubenswrapper[4800]: I1125 15:55:17.698123 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88ztj" event={"ID":"0db6c444-b65d-4275-9b51-b0f48b7acf80","Type":"ContainerStarted","Data":"dbb1e52feb39dd9c0e095e81bea8895518370ede67e70f6e8a3e573a026b66c0"} Nov 25 15:55:18 crc kubenswrapper[4800]: I1125 15:55:18.736145 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-88ztj" podStartSLOduration=4.293450788 podStartE2EDuration="11.736117523s" podCreationTimestamp="2025-11-25 15:55:07 +0000 UTC" firstStartedPulling="2025-11-25 15:55:09.598289875 +0000 UTC m=+2270.652698397" lastFinishedPulling="2025-11-25 15:55:17.04095665 +0000 UTC m=+2278.095365132" observedRunningTime="2025-11-25 15:55:18.732089712 +0000 UTC m=+2279.786498204" watchObservedRunningTime="2025-11-25 15:55:18.736117523 +0000 UTC m=+2279.790526005" Nov 25 15:55:18 crc kubenswrapper[4800]: E1125 15:55:18.990383 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e007e5c_ed28_4a0a_853b_0b9cd661e75e.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e007e5c_ed28_4a0a_853b_0b9cd661e75e.slice/crio-c947a32036138bf7d8039c0b8bd4c3c3bb9bebaf65e8d00652e35f30b87cf159\": RecentStats: unable to find data in memory cache]" Nov 25 15:55:28 crc kubenswrapper[4800]: I1125 15:55:28.143093 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:28 crc kubenswrapper[4800]: I1125 15:55:28.144828 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:28 crc kubenswrapper[4800]: I1125 15:55:28.194596 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:28 crc kubenswrapper[4800]: I1125 15:55:28.876573 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:29 crc kubenswrapper[4800]: I1125 15:55:29.891865 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-88ztj"] Nov 25 15:55:30 crc kubenswrapper[4800]: I1125 15:55:30.894286 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-88ztj" podUID="0db6c444-b65d-4275-9b51-b0f48b7acf80" containerName="registry-server" containerID="cri-o://dbb1e52feb39dd9c0e095e81bea8895518370ede67e70f6e8a3e573a026b66c0" gracePeriod=2 Nov 25 15:55:31 crc kubenswrapper[4800]: I1125 15:55:31.909282 4800 generic.go:334] "Generic (PLEG): container finished" podID="0db6c444-b65d-4275-9b51-b0f48b7acf80" containerID="dbb1e52feb39dd9c0e095e81bea8895518370ede67e70f6e8a3e573a026b66c0" exitCode=0 Nov 25 15:55:32 crc kubenswrapper[4800]: I1125 15:55:32.069530 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88ztj" event={"ID":"0db6c444-b65d-4275-9b51-b0f48b7acf80","Type":"ContainerDied","Data":"dbb1e52feb39dd9c0e095e81bea8895518370ede67e70f6e8a3e573a026b66c0"} Nov 25 15:55:32 crc kubenswrapper[4800]: I1125 15:55:32.421826 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:32 crc kubenswrapper[4800]: I1125 15:55:32.531124 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0db6c444-b65d-4275-9b51-b0f48b7acf80-utilities\") pod \"0db6c444-b65d-4275-9b51-b0f48b7acf80\" (UID: \"0db6c444-b65d-4275-9b51-b0f48b7acf80\") " Nov 25 15:55:32 crc kubenswrapper[4800]: I1125 15:55:32.531319 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0db6c444-b65d-4275-9b51-b0f48b7acf80-catalog-content\") pod \"0db6c444-b65d-4275-9b51-b0f48b7acf80\" (UID: \"0db6c444-b65d-4275-9b51-b0f48b7acf80\") " Nov 25 15:55:32 crc kubenswrapper[4800]: I1125 15:55:32.531467 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fds5h\" (UniqueName: \"kubernetes.io/projected/0db6c444-b65d-4275-9b51-b0f48b7acf80-kube-api-access-fds5h\") pod \"0db6c444-b65d-4275-9b51-b0f48b7acf80\" (UID: \"0db6c444-b65d-4275-9b51-b0f48b7acf80\") " Nov 25 15:55:32 crc kubenswrapper[4800]: I1125 15:55:32.532323 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0db6c444-b65d-4275-9b51-b0f48b7acf80-utilities" (OuterVolumeSpecName: "utilities") pod "0db6c444-b65d-4275-9b51-b0f48b7acf80" (UID: "0db6c444-b65d-4275-9b51-b0f48b7acf80"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:55:32 crc kubenswrapper[4800]: I1125 15:55:32.541271 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0db6c444-b65d-4275-9b51-b0f48b7acf80-kube-api-access-fds5h" (OuterVolumeSpecName: "kube-api-access-fds5h") pod "0db6c444-b65d-4275-9b51-b0f48b7acf80" (UID: "0db6c444-b65d-4275-9b51-b0f48b7acf80"). InnerVolumeSpecName "kube-api-access-fds5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:55:32 crc kubenswrapper[4800]: I1125 15:55:32.634105 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0db6c444-b65d-4275-9b51-b0f48b7acf80-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 15:55:32 crc kubenswrapper[4800]: I1125 15:55:32.634155 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fds5h\" (UniqueName: \"kubernetes.io/projected/0db6c444-b65d-4275-9b51-b0f48b7acf80-kube-api-access-fds5h\") on node \"crc\" DevicePath \"\"" Nov 25 15:55:32 crc kubenswrapper[4800]: I1125 15:55:32.920000 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88ztj" event={"ID":"0db6c444-b65d-4275-9b51-b0f48b7acf80","Type":"ContainerDied","Data":"d2881adf7a711de18ff83672df25d58297033ecdabacfb25e4660c1774e1a2b8"} Nov 25 15:55:32 crc kubenswrapper[4800]: I1125 15:55:32.920479 4800 scope.go:117] "RemoveContainer" containerID="dbb1e52feb39dd9c0e095e81bea8895518370ede67e70f6e8a3e573a026b66c0" Nov 25 15:55:32 crc kubenswrapper[4800]: I1125 15:55:32.920073 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-88ztj" Nov 25 15:55:33 crc kubenswrapper[4800]: I1125 15:55:33.213631 4800 scope.go:117] "RemoveContainer" containerID="d14596dafa1c721239dec0e134e60da93b47242f83a92628aa767b6f5c252051" Nov 25 15:55:33 crc kubenswrapper[4800]: I1125 15:55:33.244733 4800 scope.go:117] "RemoveContainer" containerID="4f804455c5ab039dfdad9e63411fb537e6f46b40f333c7e4e4594e13dc1154d1" Nov 25 15:55:33 crc kubenswrapper[4800]: I1125 15:55:33.381457 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0db6c444-b65d-4275-9b51-b0f48b7acf80-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0db6c444-b65d-4275-9b51-b0f48b7acf80" (UID: "0db6c444-b65d-4275-9b51-b0f48b7acf80"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 15:55:33 crc kubenswrapper[4800]: I1125 15:55:33.455400 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0db6c444-b65d-4275-9b51-b0f48b7acf80-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 15:55:33 crc kubenswrapper[4800]: I1125 15:55:33.582411 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-88ztj"] Nov 25 15:55:33 crc kubenswrapper[4800]: I1125 15:55:33.599097 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-88ztj"] Nov 25 15:55:33 crc kubenswrapper[4800]: I1125 15:55:33.797179 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0db6c444-b65d-4275-9b51-b0f48b7acf80" path="/var/lib/kubelet/pods/0db6c444-b65d-4275-9b51-b0f48b7acf80/volumes" Nov 25 15:56:12 crc kubenswrapper[4800]: I1125 15:56:12.640689 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:56:12 crc kubenswrapper[4800]: I1125 15:56:12.641300 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:56:42 crc kubenswrapper[4800]: I1125 15:56:42.641105 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:56:42 crc kubenswrapper[4800]: I1125 15:56:42.641733 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.841363 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.853093 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.863808 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.873071 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xtj7g"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.882133 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.890785 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-nd59v"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.903724 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.913280 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.934868 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-jf74q"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.950800 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.964330 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xf2np"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.974034 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.981257 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xtj7g"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.989598 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2h4vc"] Nov 25 15:56:49 crc kubenswrapper[4800]: I1125 15:56:49.997502 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mc24s"] Nov 25 15:56:50 crc kubenswrapper[4800]: I1125 15:56:50.006771 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-nfd5s"] Nov 25 15:56:50 crc kubenswrapper[4800]: I1125 15:56:50.013369 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6pv2q"] Nov 25 15:56:50 crc kubenswrapper[4800]: I1125 15:56:50.019718 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6"] Nov 25 15:56:50 crc kubenswrapper[4800]: I1125 15:56:50.025622 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-bt745"] Nov 25 15:56:50 crc kubenswrapper[4800]: I1125 15:56:50.031495 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-dtmq6"] Nov 25 15:56:51 crc kubenswrapper[4800]: I1125 15:56:51.796717 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0db87c6c-7306-4a1f-814f-312ff0ff1361" path="/var/lib/kubelet/pods/0db87c6c-7306-4a1f-814f-312ff0ff1361/volumes" Nov 25 15:56:51 crc kubenswrapper[4800]: I1125 15:56:51.798914 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32fce522-1642-4591-8201-17f0fdc8b096" path="/var/lib/kubelet/pods/32fce522-1642-4591-8201-17f0fdc8b096/volumes" Nov 25 15:56:51 crc kubenswrapper[4800]: I1125 15:56:51.799552 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37c204e4-e594-4a65-bdee-67202c8847fd" path="/var/lib/kubelet/pods/37c204e4-e594-4a65-bdee-67202c8847fd/volumes" Nov 25 15:56:51 crc kubenswrapper[4800]: I1125 15:56:51.800256 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6049b73c-a6c2-490c-8076-86b69a295a0c" path="/var/lib/kubelet/pods/6049b73c-a6c2-490c-8076-86b69a295a0c/volumes" Nov 25 15:56:51 crc kubenswrapper[4800]: I1125 15:56:51.801530 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="713eb615-1098-49ed-9749-50ba1822b159" path="/var/lib/kubelet/pods/713eb615-1098-49ed-9749-50ba1822b159/volumes" Nov 25 15:56:51 crc kubenswrapper[4800]: I1125 15:56:51.802317 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97e491f0-75a3-49ba-a6a3-1a3f1c6522bf" path="/var/lib/kubelet/pods/97e491f0-75a3-49ba-a6a3-1a3f1c6522bf/volumes" Nov 25 15:56:51 crc kubenswrapper[4800]: I1125 15:56:51.802921 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a28afd1d-fdf7-4a3b-9353-071ef1c85944" path="/var/lib/kubelet/pods/a28afd1d-fdf7-4a3b-9353-071ef1c85944/volumes" Nov 25 15:56:51 crc kubenswrapper[4800]: I1125 15:56:51.803975 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8ed30c2-c85a-41d7-be2b-fa9dfe81547b" path="/var/lib/kubelet/pods/b8ed30c2-c85a-41d7-be2b-fa9dfe81547b/volumes" Nov 25 15:56:51 crc kubenswrapper[4800]: I1125 15:56:51.804545 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d98b1555-9d8a-4311-afe5-eefa81cf571e" path="/var/lib/kubelet/pods/d98b1555-9d8a-4311-afe5-eefa81cf571e/volumes" Nov 25 15:56:51 crc kubenswrapper[4800]: I1125 15:56:51.805133 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e48dfcf5-6a27-44b9-aca0-56a9411ae73e" path="/var/lib/kubelet/pods/e48dfcf5-6a27-44b9-aca0-56a9411ae73e/volumes" Nov 25 15:56:55 crc kubenswrapper[4800]: I1125 15:56:55.946093 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv"] Nov 25 15:56:55 crc kubenswrapper[4800]: E1125 15:56:55.947384 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0db6c444-b65d-4275-9b51-b0f48b7acf80" containerName="extract-utilities" Nov 25 15:56:55 crc kubenswrapper[4800]: I1125 15:56:55.947403 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0db6c444-b65d-4275-9b51-b0f48b7acf80" containerName="extract-utilities" Nov 25 15:56:55 crc kubenswrapper[4800]: E1125 15:56:55.947419 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0db6c444-b65d-4275-9b51-b0f48b7acf80" containerName="extract-content" Nov 25 15:56:55 crc kubenswrapper[4800]: I1125 15:56:55.947428 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0db6c444-b65d-4275-9b51-b0f48b7acf80" containerName="extract-content" Nov 25 15:56:55 crc kubenswrapper[4800]: E1125 15:56:55.947443 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0db6c444-b65d-4275-9b51-b0f48b7acf80" containerName="registry-server" Nov 25 15:56:55 crc kubenswrapper[4800]: I1125 15:56:55.947455 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0db6c444-b65d-4275-9b51-b0f48b7acf80" containerName="registry-server" Nov 25 15:56:55 crc kubenswrapper[4800]: I1125 15:56:55.947688 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="0db6c444-b65d-4275-9b51-b0f48b7acf80" containerName="registry-server" Nov 25 15:56:55 crc kubenswrapper[4800]: I1125 15:56:55.948630 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:55 crc kubenswrapper[4800]: I1125 15:56:55.951521 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 15:56:55 crc kubenswrapper[4800]: I1125 15:56:55.951793 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:56:55 crc kubenswrapper[4800]: I1125 15:56:55.952284 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:56:55 crc kubenswrapper[4800]: I1125 15:56:55.953680 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:56:55 crc kubenswrapper[4800]: I1125 15:56:55.956290 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv"] Nov 25 15:56:55 crc kubenswrapper[4800]: I1125 15:56:55.958202 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.100300 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.100700 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.100750 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwrkt\" (UniqueName: \"kubernetes.io/projected/abeab1aa-d713-443d-b487-9a59f90d161a-kube-api-access-qwrkt\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.100913 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.100947 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.203073 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.203122 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.203159 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwrkt\" (UniqueName: \"kubernetes.io/projected/abeab1aa-d713-443d-b487-9a59f90d161a-kube-api-access-qwrkt\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.203228 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.203256 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.211442 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.211687 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.211869 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.214386 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.226164 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwrkt\" (UniqueName: \"kubernetes.io/projected/abeab1aa-d713-443d-b487-9a59f90d161a-kube-api-access-qwrkt\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.282078 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:56:56 crc kubenswrapper[4800]: I1125 15:56:56.887160 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv"] Nov 25 15:56:57 crc kubenswrapper[4800]: I1125 15:56:57.245583 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" event={"ID":"abeab1aa-d713-443d-b487-9a59f90d161a","Type":"ContainerStarted","Data":"2dcc9cd9ef29e4514b6161a4775bcbe48d1cbe528d022f4b3ba0b940a16de5a4"} Nov 25 15:56:58 crc kubenswrapper[4800]: I1125 15:56:58.258651 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" event={"ID":"abeab1aa-d713-443d-b487-9a59f90d161a","Type":"ContainerStarted","Data":"ab54b13f652f5376853219c2d8b830434685bd02d9faf88165fb9b676386df7c"} Nov 25 15:56:58 crc kubenswrapper[4800]: I1125 15:56:58.282988 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" podStartSLOduration=2.735962316 podStartE2EDuration="3.282950901s" podCreationTimestamp="2025-11-25 15:56:55 +0000 UTC" firstStartedPulling="2025-11-25 15:56:56.892590958 +0000 UTC m=+2377.946999460" lastFinishedPulling="2025-11-25 15:56:57.439579563 +0000 UTC m=+2378.493988045" observedRunningTime="2025-11-25 15:56:58.282922069 +0000 UTC m=+2379.337330561" watchObservedRunningTime="2025-11-25 15:56:58.282950901 +0000 UTC m=+2379.337359443" Nov 25 15:57:10 crc kubenswrapper[4800]: I1125 15:57:10.376222 4800 generic.go:334] "Generic (PLEG): container finished" podID="abeab1aa-d713-443d-b487-9a59f90d161a" containerID="ab54b13f652f5376853219c2d8b830434685bd02d9faf88165fb9b676386df7c" exitCode=0 Nov 25 15:57:10 crc kubenswrapper[4800]: I1125 15:57:10.376315 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" event={"ID":"abeab1aa-d713-443d-b487-9a59f90d161a","Type":"ContainerDied","Data":"ab54b13f652f5376853219c2d8b830434685bd02d9faf88165fb9b676386df7c"} Nov 25 15:57:11 crc kubenswrapper[4800]: I1125 15:57:11.987225 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.120413 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-inventory\") pod \"abeab1aa-d713-443d-b487-9a59f90d161a\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.120663 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-repo-setup-combined-ca-bundle\") pod \"abeab1aa-d713-443d-b487-9a59f90d161a\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.120690 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwrkt\" (UniqueName: \"kubernetes.io/projected/abeab1aa-d713-443d-b487-9a59f90d161a-kube-api-access-qwrkt\") pod \"abeab1aa-d713-443d-b487-9a59f90d161a\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.120714 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-ssh-key\") pod \"abeab1aa-d713-443d-b487-9a59f90d161a\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.120802 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-ceph\") pod \"abeab1aa-d713-443d-b487-9a59f90d161a\" (UID: \"abeab1aa-d713-443d-b487-9a59f90d161a\") " Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.128572 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-ceph" (OuterVolumeSpecName: "ceph") pod "abeab1aa-d713-443d-b487-9a59f90d161a" (UID: "abeab1aa-d713-443d-b487-9a59f90d161a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.128588 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "abeab1aa-d713-443d-b487-9a59f90d161a" (UID: "abeab1aa-d713-443d-b487-9a59f90d161a"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.129298 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abeab1aa-d713-443d-b487-9a59f90d161a-kube-api-access-qwrkt" (OuterVolumeSpecName: "kube-api-access-qwrkt") pod "abeab1aa-d713-443d-b487-9a59f90d161a" (UID: "abeab1aa-d713-443d-b487-9a59f90d161a"). InnerVolumeSpecName "kube-api-access-qwrkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.157331 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-inventory" (OuterVolumeSpecName: "inventory") pod "abeab1aa-d713-443d-b487-9a59f90d161a" (UID: "abeab1aa-d713-443d-b487-9a59f90d161a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.160981 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "abeab1aa-d713-443d-b487-9a59f90d161a" (UID: "abeab1aa-d713-443d-b487-9a59f90d161a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.223386 4800 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.223422 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwrkt\" (UniqueName: \"kubernetes.io/projected/abeab1aa-d713-443d-b487-9a59f90d161a-kube-api-access-qwrkt\") on node \"crc\" DevicePath \"\"" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.223432 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.223445 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.223455 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/abeab1aa-d713-443d-b487-9a59f90d161a-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.407478 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" event={"ID":"abeab1aa-d713-443d-b487-9a59f90d161a","Type":"ContainerDied","Data":"2dcc9cd9ef29e4514b6161a4775bcbe48d1cbe528d022f4b3ba0b940a16de5a4"} Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.407528 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2dcc9cd9ef29e4514b6161a4775bcbe48d1cbe528d022f4b3ba0b940a16de5a4" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.407563 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.484970 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m"] Nov 25 15:57:12 crc kubenswrapper[4800]: E1125 15:57:12.485440 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abeab1aa-d713-443d-b487-9a59f90d161a" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.485477 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="abeab1aa-d713-443d-b487-9a59f90d161a" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.485755 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="abeab1aa-d713-443d-b487-9a59f90d161a" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.486483 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.490151 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.490384 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.490441 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.490574 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.495745 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.511424 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m"] Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.530718 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.530818 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.530883 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.530948 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf44q\" (UniqueName: \"kubernetes.io/projected/0316ba41-9805-4c20-ace9-757468989756-kube-api-access-gf44q\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.531009 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.633537 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.633734 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.633808 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.633862 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.633925 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf44q\" (UniqueName: \"kubernetes.io/projected/0316ba41-9805-4c20-ace9-757468989756-kube-api-access-gf44q\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.638547 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.639090 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.639255 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.639609 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.639699 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.639691 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.639774 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.640543 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.640598 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" gracePeriod=600 Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.652040 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf44q\" (UniqueName: \"kubernetes.io/projected/0316ba41-9805-4c20-ace9-757468989756-kube-api-access-gf44q\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:12 crc kubenswrapper[4800]: E1125 15:57:12.788067 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:57:12 crc kubenswrapper[4800]: I1125 15:57:12.808541 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:57:13 crc kubenswrapper[4800]: I1125 15:57:13.396039 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m"] Nov 25 15:57:13 crc kubenswrapper[4800]: I1125 15:57:13.418539 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" event={"ID":"0316ba41-9805-4c20-ace9-757468989756","Type":"ContainerStarted","Data":"94e5cacaba98b11f3342e1d33a3a690e20f3829b3f00757f039b08014f44c826"} Nov 25 15:57:13 crc kubenswrapper[4800]: I1125 15:57:13.421622 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" exitCode=0 Nov 25 15:57:13 crc kubenswrapper[4800]: I1125 15:57:13.421647 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724"} Nov 25 15:57:13 crc kubenswrapper[4800]: I1125 15:57:13.421682 4800 scope.go:117] "RemoveContainer" containerID="d56ce1859034223339fd1fd96e8443b09843fa53af7fd449295e1c611c5e11ab" Nov 25 15:57:13 crc kubenswrapper[4800]: I1125 15:57:13.423582 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:57:13 crc kubenswrapper[4800]: E1125 15:57:13.424203 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:57:16 crc kubenswrapper[4800]: I1125 15:57:16.455901 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" event={"ID":"0316ba41-9805-4c20-ace9-757468989756","Type":"ContainerStarted","Data":"52720fb68ea081e70e84c9a04be72d84f82795dd74e22930723f1d4cdf6de5d0"} Nov 25 15:57:24 crc kubenswrapper[4800]: I1125 15:57:24.785973 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:57:24 crc kubenswrapper[4800]: E1125 15:57:24.786715 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:57:27 crc kubenswrapper[4800]: I1125 15:57:27.567320 4800 scope.go:117] "RemoveContainer" containerID="5015d1d592596102a55f8cac42b134addbb1ec5a5f09298977d4ab52dc4d0f0a" Nov 25 15:57:27 crc kubenswrapper[4800]: I1125 15:57:27.605156 4800 scope.go:117] "RemoveContainer" containerID="e7b2915863a018baf9090c0f34f8cdd272994f7fd4e107b64a973a1ff14e72fb" Nov 25 15:57:27 crc kubenswrapper[4800]: I1125 15:57:27.676989 4800 scope.go:117] "RemoveContainer" containerID="9c23e89d08682ad42319a20ee664c7e05cdd3a5a4a12c1cd1fe8addd8f317e9c" Nov 25 15:57:27 crc kubenswrapper[4800]: I1125 15:57:27.709912 4800 scope.go:117] "RemoveContainer" containerID="00e11517fbfb4bb44a4dc83fc317b10420340fd1db8748869728b3b9db636b88" Nov 25 15:57:27 crc kubenswrapper[4800]: I1125 15:57:27.761252 4800 scope.go:117] "RemoveContainer" containerID="5fc8b4c83631711a5dc19584c6c649aa22e4b5fbbf5ce4719933a278b1f63919" Nov 25 15:57:27 crc kubenswrapper[4800]: I1125 15:57:27.810084 4800 scope.go:117] "RemoveContainer" containerID="779d68bb45d152ea615fd5bd2efe6edbb2732b2fe491bf2a81ce4ecea6dd26e3" Nov 25 15:57:27 crc kubenswrapper[4800]: I1125 15:57:27.842910 4800 scope.go:117] "RemoveContainer" containerID="fa509734f7432e215c40f1672948caa8acdce466be9bcf2676224be9cc9a8912" Nov 25 15:57:27 crc kubenswrapper[4800]: I1125 15:57:27.877240 4800 scope.go:117] "RemoveContainer" containerID="5eb27ce4a3611659cfd85bece4dcd20e3256ae3071bd869d0137211a0ff3a1e6" Nov 25 15:57:27 crc kubenswrapper[4800]: I1125 15:57:27.911791 4800 scope.go:117] "RemoveContainer" containerID="de8a3d4f7f94bca04b2c21918f2a16a5d2a4c260c18cde61d5209cd1a11e4d86" Nov 25 15:57:27 crc kubenswrapper[4800]: I1125 15:57:27.979707 4800 scope.go:117] "RemoveContainer" containerID="e2903086cdacced893f5a7ba71cd01154d02500d46e9bd8e45a54705ea17a547" Nov 25 15:57:35 crc kubenswrapper[4800]: I1125 15:57:35.786485 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:57:35 crc kubenswrapper[4800]: E1125 15:57:35.787658 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:57:50 crc kubenswrapper[4800]: I1125 15:57:50.786620 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:57:50 crc kubenswrapper[4800]: E1125 15:57:50.787371 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:58:04 crc kubenswrapper[4800]: I1125 15:58:04.785223 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:58:04 crc kubenswrapper[4800]: E1125 15:58:04.786256 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:58:19 crc kubenswrapper[4800]: I1125 15:58:19.793751 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:58:19 crc kubenswrapper[4800]: E1125 15:58:19.794960 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:58:32 crc kubenswrapper[4800]: I1125 15:58:32.786204 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:58:32 crc kubenswrapper[4800]: E1125 15:58:32.787128 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:58:43 crc kubenswrapper[4800]: I1125 15:58:43.786071 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:58:43 crc kubenswrapper[4800]: E1125 15:58:43.787280 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:58:58 crc kubenswrapper[4800]: I1125 15:58:58.785548 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:58:58 crc kubenswrapper[4800]: E1125 15:58:58.787423 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:59:04 crc kubenswrapper[4800]: E1125 15:59:04.979440 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0316ba41_9805_4c20_ace9_757468989756.slice/crio-conmon-52720fb68ea081e70e84c9a04be72d84f82795dd74e22930723f1d4cdf6de5d0.scope\": RecentStats: unable to find data in memory cache]" Nov 25 15:59:05 crc kubenswrapper[4800]: I1125 15:59:05.820124 4800 generic.go:334] "Generic (PLEG): container finished" podID="0316ba41-9805-4c20-ace9-757468989756" containerID="52720fb68ea081e70e84c9a04be72d84f82795dd74e22930723f1d4cdf6de5d0" exitCode=0 Nov 25 15:59:05 crc kubenswrapper[4800]: I1125 15:59:05.820599 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" event={"ID":"0316ba41-9805-4c20-ace9-757468989756","Type":"ContainerDied","Data":"52720fb68ea081e70e84c9a04be72d84f82795dd74e22930723f1d4cdf6de5d0"} Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.337700 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.427209 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-ssh-key\") pod \"0316ba41-9805-4c20-ace9-757468989756\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.427566 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf44q\" (UniqueName: \"kubernetes.io/projected/0316ba41-9805-4c20-ace9-757468989756-kube-api-access-gf44q\") pod \"0316ba41-9805-4c20-ace9-757468989756\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.427749 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-inventory\") pod \"0316ba41-9805-4c20-ace9-757468989756\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.428018 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-bootstrap-combined-ca-bundle\") pod \"0316ba41-9805-4c20-ace9-757468989756\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.428232 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-ceph\") pod \"0316ba41-9805-4c20-ace9-757468989756\" (UID: \"0316ba41-9805-4c20-ace9-757468989756\") " Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.435038 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "0316ba41-9805-4c20-ace9-757468989756" (UID: "0316ba41-9805-4c20-ace9-757468989756"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.435437 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-ceph" (OuterVolumeSpecName: "ceph") pod "0316ba41-9805-4c20-ace9-757468989756" (UID: "0316ba41-9805-4c20-ace9-757468989756"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.435597 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0316ba41-9805-4c20-ace9-757468989756-kube-api-access-gf44q" (OuterVolumeSpecName: "kube-api-access-gf44q") pod "0316ba41-9805-4c20-ace9-757468989756" (UID: "0316ba41-9805-4c20-ace9-757468989756"). InnerVolumeSpecName "kube-api-access-gf44q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.458284 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-inventory" (OuterVolumeSpecName: "inventory") pod "0316ba41-9805-4c20-ace9-757468989756" (UID: "0316ba41-9805-4c20-ace9-757468989756"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.466897 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0316ba41-9805-4c20-ace9-757468989756" (UID: "0316ba41-9805-4c20-ace9-757468989756"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.531879 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.531940 4800 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.531959 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.531970 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0316ba41-9805-4c20-ace9-757468989756-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.531986 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf44q\" (UniqueName: \"kubernetes.io/projected/0316ba41-9805-4c20-ace9-757468989756-kube-api-access-gf44q\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.844166 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" event={"ID":"0316ba41-9805-4c20-ace9-757468989756","Type":"ContainerDied","Data":"94e5cacaba98b11f3342e1d33a3a690e20f3829b3f00757f039b08014f44c826"} Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.844251 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94e5cacaba98b11f3342e1d33a3a690e20f3829b3f00757f039b08014f44c826" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.844206 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.976794 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq"] Nov 25 15:59:07 crc kubenswrapper[4800]: E1125 15:59:07.977335 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0316ba41-9805-4c20-ace9-757468989756" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.977362 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0316ba41-9805-4c20-ace9-757468989756" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.977619 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="0316ba41-9805-4c20-ace9-757468989756" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.978525 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.987172 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.987334 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq"] Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.987525 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.987659 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.994760 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:59:07 crc kubenswrapper[4800]: I1125 15:59:07.994913 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.047648 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.047718 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.047750 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.048179 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2qwx\" (UniqueName: \"kubernetes.io/projected/92286c54-cedd-4519-9b0c-f72e6b79984d-kube-api-access-k2qwx\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.150671 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.150805 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.150856 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.150953 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2qwx\" (UniqueName: \"kubernetes.io/projected/92286c54-cedd-4519-9b0c-f72e6b79984d-kube-api-access-k2qwx\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.155430 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.155983 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.156429 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.174809 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2qwx\" (UniqueName: \"kubernetes.io/projected/92286c54-cedd-4519-9b0c-f72e6b79984d-kube-api-access-k2qwx\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.317271 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:08 crc kubenswrapper[4800]: W1125 15:59:08.899300 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod92286c54_cedd_4519_9b0c_f72e6b79984d.slice/crio-bb16b1a8c096d21e34d69f7870286015cb88df6e224c0d1c5620279282b55d16 WatchSource:0}: Error finding container bb16b1a8c096d21e34d69f7870286015cb88df6e224c0d1c5620279282b55d16: Status 404 returned error can't find the container with id bb16b1a8c096d21e34d69f7870286015cb88df6e224c0d1c5620279282b55d16 Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.902187 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 15:59:08 crc kubenswrapper[4800]: I1125 15:59:08.918041 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq"] Nov 25 15:59:09 crc kubenswrapper[4800]: I1125 15:59:09.863727 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" event={"ID":"92286c54-cedd-4519-9b0c-f72e6b79984d","Type":"ContainerStarted","Data":"f7ec33d93c564b2c799f86dad95b53b88d103bacf8aa72a870be6e4fc2584900"} Nov 25 15:59:09 crc kubenswrapper[4800]: I1125 15:59:09.864342 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" event={"ID":"92286c54-cedd-4519-9b0c-f72e6b79984d","Type":"ContainerStarted","Data":"bb16b1a8c096d21e34d69f7870286015cb88df6e224c0d1c5620279282b55d16"} Nov 25 15:59:09 crc kubenswrapper[4800]: I1125 15:59:09.888930 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" podStartSLOduration=2.428004161 podStartE2EDuration="2.888907926s" podCreationTimestamp="2025-11-25 15:59:07 +0000 UTC" firstStartedPulling="2025-11-25 15:59:08.901987613 +0000 UTC m=+2509.956396095" lastFinishedPulling="2025-11-25 15:59:09.362891368 +0000 UTC m=+2510.417299860" observedRunningTime="2025-11-25 15:59:09.878488723 +0000 UTC m=+2510.932897205" watchObservedRunningTime="2025-11-25 15:59:09.888907926 +0000 UTC m=+2510.943316408" Nov 25 15:59:12 crc kubenswrapper[4800]: I1125 15:59:12.785451 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:59:12 crc kubenswrapper[4800]: E1125 15:59:12.786246 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:59:27 crc kubenswrapper[4800]: I1125 15:59:27.786129 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:59:27 crc kubenswrapper[4800]: E1125 15:59:27.787104 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:59:36 crc kubenswrapper[4800]: I1125 15:59:36.135112 4800 generic.go:334] "Generic (PLEG): container finished" podID="92286c54-cedd-4519-9b0c-f72e6b79984d" containerID="f7ec33d93c564b2c799f86dad95b53b88d103bacf8aa72a870be6e4fc2584900" exitCode=0 Nov 25 15:59:36 crc kubenswrapper[4800]: I1125 15:59:36.135245 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" event={"ID":"92286c54-cedd-4519-9b0c-f72e6b79984d","Type":"ContainerDied","Data":"f7ec33d93c564b2c799f86dad95b53b88d103bacf8aa72a870be6e4fc2584900"} Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.561216 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.754151 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-ssh-key\") pod \"92286c54-cedd-4519-9b0c-f72e6b79984d\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.754326 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-ceph\") pod \"92286c54-cedd-4519-9b0c-f72e6b79984d\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.754367 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-inventory\") pod \"92286c54-cedd-4519-9b0c-f72e6b79984d\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.754569 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2qwx\" (UniqueName: \"kubernetes.io/projected/92286c54-cedd-4519-9b0c-f72e6b79984d-kube-api-access-k2qwx\") pod \"92286c54-cedd-4519-9b0c-f72e6b79984d\" (UID: \"92286c54-cedd-4519-9b0c-f72e6b79984d\") " Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.761019 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-ceph" (OuterVolumeSpecName: "ceph") pod "92286c54-cedd-4519-9b0c-f72e6b79984d" (UID: "92286c54-cedd-4519-9b0c-f72e6b79984d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.761117 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92286c54-cedd-4519-9b0c-f72e6b79984d-kube-api-access-k2qwx" (OuterVolumeSpecName: "kube-api-access-k2qwx") pod "92286c54-cedd-4519-9b0c-f72e6b79984d" (UID: "92286c54-cedd-4519-9b0c-f72e6b79984d"). InnerVolumeSpecName "kube-api-access-k2qwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.786974 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-inventory" (OuterVolumeSpecName: "inventory") pod "92286c54-cedd-4519-9b0c-f72e6b79984d" (UID: "92286c54-cedd-4519-9b0c-f72e6b79984d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.789291 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "92286c54-cedd-4519-9b0c-f72e6b79984d" (UID: "92286c54-cedd-4519-9b0c-f72e6b79984d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.857035 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.857064 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2qwx\" (UniqueName: \"kubernetes.io/projected/92286c54-cedd-4519-9b0c-f72e6b79984d-kube-api-access-k2qwx\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.857075 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:37 crc kubenswrapper[4800]: I1125 15:59:37.857159 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/92286c54-cedd-4519-9b0c-f72e6b79984d-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.161651 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" event={"ID":"92286c54-cedd-4519-9b0c-f72e6b79984d","Type":"ContainerDied","Data":"bb16b1a8c096d21e34d69f7870286015cb88df6e224c0d1c5620279282b55d16"} Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.162141 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb16b1a8c096d21e34d69f7870286015cb88df6e224c0d1c5620279282b55d16" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.161719 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.245102 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd"] Nov 25 15:59:38 crc kubenswrapper[4800]: E1125 15:59:38.245582 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92286c54-cedd-4519-9b0c-f72e6b79984d" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.245604 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="92286c54-cedd-4519-9b0c-f72e6b79984d" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.245831 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="92286c54-cedd-4519-9b0c-f72e6b79984d" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.246714 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.249195 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.252225 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.252272 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.252433 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.252485 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.266484 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd"] Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.267272 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.267335 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.267457 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffmbh\" (UniqueName: \"kubernetes.io/projected/4c1aa378-a5fb-4c41-b773-e77118db1abe-kube-api-access-ffmbh\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.267509 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.369654 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffmbh\" (UniqueName: \"kubernetes.io/projected/4c1aa378-a5fb-4c41-b773-e77118db1abe-kube-api-access-ffmbh\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.369755 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.369820 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.369894 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.374760 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.375372 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.377895 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.392683 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffmbh\" (UniqueName: \"kubernetes.io/projected/4c1aa378-a5fb-4c41-b773-e77118db1abe-kube-api-access-ffmbh\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:38 crc kubenswrapper[4800]: I1125 15:59:38.573265 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:39 crc kubenswrapper[4800]: I1125 15:59:39.182387 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd"] Nov 25 15:59:40 crc kubenswrapper[4800]: I1125 15:59:40.190720 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" event={"ID":"4c1aa378-a5fb-4c41-b773-e77118db1abe","Type":"ContainerStarted","Data":"b6d4a4826c26b7c59dc80e58e5db5194a57fe30fec38affd1f5ff74b90cbdecc"} Nov 25 15:59:40 crc kubenswrapper[4800]: I1125 15:59:40.191605 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" event={"ID":"4c1aa378-a5fb-4c41-b773-e77118db1abe","Type":"ContainerStarted","Data":"0ae6712cdaab7505b82e5b67e6eb08d1b72d7201358479f6c8e14dc786b1da74"} Nov 25 15:59:40 crc kubenswrapper[4800]: I1125 15:59:40.214763 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" podStartSLOduration=1.7376528530000002 podStartE2EDuration="2.214742059s" podCreationTimestamp="2025-11-25 15:59:38 +0000 UTC" firstStartedPulling="2025-11-25 15:59:39.198421024 +0000 UTC m=+2540.252829506" lastFinishedPulling="2025-11-25 15:59:39.67551023 +0000 UTC m=+2540.729918712" observedRunningTime="2025-11-25 15:59:40.208765966 +0000 UTC m=+2541.263174448" watchObservedRunningTime="2025-11-25 15:59:40.214742059 +0000 UTC m=+2541.269150541" Nov 25 15:59:42 crc kubenswrapper[4800]: I1125 15:59:42.785903 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:59:42 crc kubenswrapper[4800]: E1125 15:59:42.786543 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 15:59:45 crc kubenswrapper[4800]: I1125 15:59:45.243286 4800 generic.go:334] "Generic (PLEG): container finished" podID="4c1aa378-a5fb-4c41-b773-e77118db1abe" containerID="b6d4a4826c26b7c59dc80e58e5db5194a57fe30fec38affd1f5ff74b90cbdecc" exitCode=0 Nov 25 15:59:45 crc kubenswrapper[4800]: I1125 15:59:45.243356 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" event={"ID":"4c1aa378-a5fb-4c41-b773-e77118db1abe","Type":"ContainerDied","Data":"b6d4a4826c26b7c59dc80e58e5db5194a57fe30fec38affd1f5ff74b90cbdecc"} Nov 25 15:59:46 crc kubenswrapper[4800]: I1125 15:59:46.777835 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:46 crc kubenswrapper[4800]: I1125 15:59:46.907781 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-ceph\") pod \"4c1aa378-a5fb-4c41-b773-e77118db1abe\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " Nov 25 15:59:46 crc kubenswrapper[4800]: I1125 15:59:46.907902 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-inventory\") pod \"4c1aa378-a5fb-4c41-b773-e77118db1abe\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " Nov 25 15:59:46 crc kubenswrapper[4800]: I1125 15:59:46.907962 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ffmbh\" (UniqueName: \"kubernetes.io/projected/4c1aa378-a5fb-4c41-b773-e77118db1abe-kube-api-access-ffmbh\") pod \"4c1aa378-a5fb-4c41-b773-e77118db1abe\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " Nov 25 15:59:46 crc kubenswrapper[4800]: I1125 15:59:46.908004 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-ssh-key\") pod \"4c1aa378-a5fb-4c41-b773-e77118db1abe\" (UID: \"4c1aa378-a5fb-4c41-b773-e77118db1abe\") " Nov 25 15:59:46 crc kubenswrapper[4800]: I1125 15:59:46.916478 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c1aa378-a5fb-4c41-b773-e77118db1abe-kube-api-access-ffmbh" (OuterVolumeSpecName: "kube-api-access-ffmbh") pod "4c1aa378-a5fb-4c41-b773-e77118db1abe" (UID: "4c1aa378-a5fb-4c41-b773-e77118db1abe"). InnerVolumeSpecName "kube-api-access-ffmbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 15:59:46 crc kubenswrapper[4800]: I1125 15:59:46.917462 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-ceph" (OuterVolumeSpecName: "ceph") pod "4c1aa378-a5fb-4c41-b773-e77118db1abe" (UID: "4c1aa378-a5fb-4c41-b773-e77118db1abe"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:59:46 crc kubenswrapper[4800]: I1125 15:59:46.942726 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-inventory" (OuterVolumeSpecName: "inventory") pod "4c1aa378-a5fb-4c41-b773-e77118db1abe" (UID: "4c1aa378-a5fb-4c41-b773-e77118db1abe"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:59:46 crc kubenswrapper[4800]: I1125 15:59:46.943763 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4c1aa378-a5fb-4c41-b773-e77118db1abe" (UID: "4c1aa378-a5fb-4c41-b773-e77118db1abe"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.011663 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.011708 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ffmbh\" (UniqueName: \"kubernetes.io/projected/4c1aa378-a5fb-4c41-b773-e77118db1abe-kube-api-access-ffmbh\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.011723 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.011733 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4c1aa378-a5fb-4c41-b773-e77118db1abe-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.269429 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" event={"ID":"4c1aa378-a5fb-4c41-b773-e77118db1abe","Type":"ContainerDied","Data":"0ae6712cdaab7505b82e5b67e6eb08d1b72d7201358479f6c8e14dc786b1da74"} Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.269508 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ae6712cdaab7505b82e5b67e6eb08d1b72d7201358479f6c8e14dc786b1da74" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.269559 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.357513 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6"] Nov 25 15:59:47 crc kubenswrapper[4800]: E1125 15:59:47.358017 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c1aa378-a5fb-4c41-b773-e77118db1abe" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.358034 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c1aa378-a5fb-4c41-b773-e77118db1abe" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.358237 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c1aa378-a5fb-4c41-b773-e77118db1abe" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.359152 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.361876 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.362258 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.362313 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.362323 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.362442 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.368688 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6"] Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.422757 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnpt2\" (UniqueName: \"kubernetes.io/projected/43707459-1078-4789-9cb5-b40d41b41d97-kube-api-access-cnpt2\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4dch6\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.422898 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4dch6\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.423051 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4dch6\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.423103 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4dch6\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.526288 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4dch6\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.526403 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4dch6\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.526632 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnpt2\" (UniqueName: \"kubernetes.io/projected/43707459-1078-4789-9cb5-b40d41b41d97-kube-api-access-cnpt2\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4dch6\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.526741 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4dch6\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.532681 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4dch6\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.533075 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4dch6\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.533763 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4dch6\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.562108 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnpt2\" (UniqueName: \"kubernetes.io/projected/43707459-1078-4789-9cb5-b40d41b41d97-kube-api-access-cnpt2\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-4dch6\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:47 crc kubenswrapper[4800]: I1125 15:59:47.677820 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 15:59:48 crc kubenswrapper[4800]: I1125 15:59:48.318464 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6"] Nov 25 15:59:49 crc kubenswrapper[4800]: I1125 15:59:49.292566 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" event={"ID":"43707459-1078-4789-9cb5-b40d41b41d97","Type":"ContainerStarted","Data":"e3c17650c36015bddeb43adbcb6a3b726f2e4f67ac70941b99aae79322a105dc"} Nov 25 15:59:49 crc kubenswrapper[4800]: I1125 15:59:49.293089 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" event={"ID":"43707459-1078-4789-9cb5-b40d41b41d97","Type":"ContainerStarted","Data":"ad00f15b0a2bd37f4c32ee86452e6138709942b6579563abdcd070282ed2a392"} Nov 25 15:59:49 crc kubenswrapper[4800]: I1125 15:59:49.316540 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" podStartSLOduration=1.882139078 podStartE2EDuration="2.31650718s" podCreationTimestamp="2025-11-25 15:59:47 +0000 UTC" firstStartedPulling="2025-11-25 15:59:48.320118898 +0000 UTC m=+2549.374527380" lastFinishedPulling="2025-11-25 15:59:48.75448696 +0000 UTC m=+2549.808895482" observedRunningTime="2025-11-25 15:59:49.312564342 +0000 UTC m=+2550.366972854" watchObservedRunningTime="2025-11-25 15:59:49.31650718 +0000 UTC m=+2550.370915672" Nov 25 15:59:55 crc kubenswrapper[4800]: I1125 15:59:55.785558 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 15:59:55 crc kubenswrapper[4800]: E1125 15:59:55.786478 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.157716 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr"] Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.160241 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.163604 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.164378 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.171049 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr"] Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.229354 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gj96x\" (UniqueName: \"kubernetes.io/projected/1c852c73-a4a0-470b-a46d-98a1d7408f72-kube-api-access-gj96x\") pod \"collect-profiles-29401440-sp8hr\" (UID: \"1c852c73-a4a0-470b-a46d-98a1d7408f72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.229719 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c852c73-a4a0-470b-a46d-98a1d7408f72-config-volume\") pod \"collect-profiles-29401440-sp8hr\" (UID: \"1c852c73-a4a0-470b-a46d-98a1d7408f72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.230034 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c852c73-a4a0-470b-a46d-98a1d7408f72-secret-volume\") pod \"collect-profiles-29401440-sp8hr\" (UID: \"1c852c73-a4a0-470b-a46d-98a1d7408f72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.333180 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gj96x\" (UniqueName: \"kubernetes.io/projected/1c852c73-a4a0-470b-a46d-98a1d7408f72-kube-api-access-gj96x\") pod \"collect-profiles-29401440-sp8hr\" (UID: \"1c852c73-a4a0-470b-a46d-98a1d7408f72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.333538 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c852c73-a4a0-470b-a46d-98a1d7408f72-config-volume\") pod \"collect-profiles-29401440-sp8hr\" (UID: \"1c852c73-a4a0-470b-a46d-98a1d7408f72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.333636 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c852c73-a4a0-470b-a46d-98a1d7408f72-secret-volume\") pod \"collect-profiles-29401440-sp8hr\" (UID: \"1c852c73-a4a0-470b-a46d-98a1d7408f72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.334742 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c852c73-a4a0-470b-a46d-98a1d7408f72-config-volume\") pod \"collect-profiles-29401440-sp8hr\" (UID: \"1c852c73-a4a0-470b-a46d-98a1d7408f72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.348681 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c852c73-a4a0-470b-a46d-98a1d7408f72-secret-volume\") pod \"collect-profiles-29401440-sp8hr\" (UID: \"1c852c73-a4a0-470b-a46d-98a1d7408f72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.358086 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gj96x\" (UniqueName: \"kubernetes.io/projected/1c852c73-a4a0-470b-a46d-98a1d7408f72-kube-api-access-gj96x\") pod \"collect-profiles-29401440-sp8hr\" (UID: \"1c852c73-a4a0-470b-a46d-98a1d7408f72\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.498801 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:00 crc kubenswrapper[4800]: I1125 16:00:00.974992 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr"] Nov 25 16:00:00 crc kubenswrapper[4800]: W1125 16:00:00.981786 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c852c73_a4a0_470b_a46d_98a1d7408f72.slice/crio-2a8a313691441862fb65c3a4cde904dc51df436a90f7dc693828b86319a92770 WatchSource:0}: Error finding container 2a8a313691441862fb65c3a4cde904dc51df436a90f7dc693828b86319a92770: Status 404 returned error can't find the container with id 2a8a313691441862fb65c3a4cde904dc51df436a90f7dc693828b86319a92770 Nov 25 16:00:01 crc kubenswrapper[4800]: I1125 16:00:01.409223 4800 generic.go:334] "Generic (PLEG): container finished" podID="1c852c73-a4a0-470b-a46d-98a1d7408f72" containerID="7f1ca107fa3933a5b37cfdb5e8881fa2f417ec2192865101a38d03cf3eb1a671" exitCode=0 Nov 25 16:00:01 crc kubenswrapper[4800]: I1125 16:00:01.409288 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" event={"ID":"1c852c73-a4a0-470b-a46d-98a1d7408f72","Type":"ContainerDied","Data":"7f1ca107fa3933a5b37cfdb5e8881fa2f417ec2192865101a38d03cf3eb1a671"} Nov 25 16:00:01 crc kubenswrapper[4800]: I1125 16:00:01.409678 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" event={"ID":"1c852c73-a4a0-470b-a46d-98a1d7408f72","Type":"ContainerStarted","Data":"2a8a313691441862fb65c3a4cde904dc51df436a90f7dc693828b86319a92770"} Nov 25 16:00:02 crc kubenswrapper[4800]: I1125 16:00:02.784930 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:02 crc kubenswrapper[4800]: I1125 16:00:02.889626 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c852c73-a4a0-470b-a46d-98a1d7408f72-config-volume\") pod \"1c852c73-a4a0-470b-a46d-98a1d7408f72\" (UID: \"1c852c73-a4a0-470b-a46d-98a1d7408f72\") " Nov 25 16:00:02 crc kubenswrapper[4800]: I1125 16:00:02.889934 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gj96x\" (UniqueName: \"kubernetes.io/projected/1c852c73-a4a0-470b-a46d-98a1d7408f72-kube-api-access-gj96x\") pod \"1c852c73-a4a0-470b-a46d-98a1d7408f72\" (UID: \"1c852c73-a4a0-470b-a46d-98a1d7408f72\") " Nov 25 16:00:02 crc kubenswrapper[4800]: I1125 16:00:02.889969 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c852c73-a4a0-470b-a46d-98a1d7408f72-secret-volume\") pod \"1c852c73-a4a0-470b-a46d-98a1d7408f72\" (UID: \"1c852c73-a4a0-470b-a46d-98a1d7408f72\") " Nov 25 16:00:02 crc kubenswrapper[4800]: I1125 16:00:02.891010 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c852c73-a4a0-470b-a46d-98a1d7408f72-config-volume" (OuterVolumeSpecName: "config-volume") pod "1c852c73-a4a0-470b-a46d-98a1d7408f72" (UID: "1c852c73-a4a0-470b-a46d-98a1d7408f72"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:00:02 crc kubenswrapper[4800]: I1125 16:00:02.900938 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c852c73-a4a0-470b-a46d-98a1d7408f72-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1c852c73-a4a0-470b-a46d-98a1d7408f72" (UID: "1c852c73-a4a0-470b-a46d-98a1d7408f72"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:00:02 crc kubenswrapper[4800]: I1125 16:00:02.909041 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c852c73-a4a0-470b-a46d-98a1d7408f72-kube-api-access-gj96x" (OuterVolumeSpecName: "kube-api-access-gj96x") pod "1c852c73-a4a0-470b-a46d-98a1d7408f72" (UID: "1c852c73-a4a0-470b-a46d-98a1d7408f72"). InnerVolumeSpecName "kube-api-access-gj96x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:00:02 crc kubenswrapper[4800]: I1125 16:00:02.993663 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c852c73-a4a0-470b-a46d-98a1d7408f72-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 16:00:02 crc kubenswrapper[4800]: I1125 16:00:02.993710 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gj96x\" (UniqueName: \"kubernetes.io/projected/1c852c73-a4a0-470b-a46d-98a1d7408f72-kube-api-access-gj96x\") on node \"crc\" DevicePath \"\"" Nov 25 16:00:02 crc kubenswrapper[4800]: I1125 16:00:02.993729 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c852c73-a4a0-470b-a46d-98a1d7408f72-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 16:00:03 crc kubenswrapper[4800]: I1125 16:00:03.434807 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" event={"ID":"1c852c73-a4a0-470b-a46d-98a1d7408f72","Type":"ContainerDied","Data":"2a8a313691441862fb65c3a4cde904dc51df436a90f7dc693828b86319a92770"} Nov 25 16:00:03 crc kubenswrapper[4800]: I1125 16:00:03.434919 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a8a313691441862fb65c3a4cde904dc51df436a90f7dc693828b86319a92770" Nov 25 16:00:03 crc kubenswrapper[4800]: I1125 16:00:03.434968 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr" Nov 25 16:00:03 crc kubenswrapper[4800]: I1125 16:00:03.876223 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx"] Nov 25 16:00:03 crc kubenswrapper[4800]: I1125 16:00:03.886400 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401395-ls8lx"] Nov 25 16:00:05 crc kubenswrapper[4800]: I1125 16:00:05.798012 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40" path="/var/lib/kubelet/pods/1fe3fca3-1127-4a40-bf8e-bb6f2cd7aa40/volumes" Nov 25 16:00:08 crc kubenswrapper[4800]: I1125 16:00:08.785971 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 16:00:08 crc kubenswrapper[4800]: E1125 16:00:08.787016 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:00:23 crc kubenswrapper[4800]: I1125 16:00:23.786065 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 16:00:23 crc kubenswrapper[4800]: E1125 16:00:23.786959 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:00:28 crc kubenswrapper[4800]: I1125 16:00:28.217380 4800 scope.go:117] "RemoveContainer" containerID="567bc7e9dd17b9fe42ca66bb67b0e7eafe166700df7803005c8af2d172ff8c53" Nov 25 16:00:28 crc kubenswrapper[4800]: I1125 16:00:28.245441 4800 scope.go:117] "RemoveContainer" containerID="260a8bf867ddaa328fc390e8b91dd542b5a876a7d0d6fff056b71a9b99945022" Nov 25 16:00:28 crc kubenswrapper[4800]: I1125 16:00:28.306206 4800 scope.go:117] "RemoveContainer" containerID="eb8ce379ff209a0acdbb0a8617d0786ea508d29d36a27ee7ac9c8a1e4baac375" Nov 25 16:00:28 crc kubenswrapper[4800]: I1125 16:00:28.364811 4800 scope.go:117] "RemoveContainer" containerID="063a7f1239af9141e895d86f99290d7c7cf8f332da90471cefbf532ae5965102" Nov 25 16:00:29 crc kubenswrapper[4800]: I1125 16:00:29.706312 4800 generic.go:334] "Generic (PLEG): container finished" podID="43707459-1078-4789-9cb5-b40d41b41d97" containerID="e3c17650c36015bddeb43adbcb6a3b726f2e4f67ac70941b99aae79322a105dc" exitCode=0 Nov 25 16:00:29 crc kubenswrapper[4800]: I1125 16:00:29.706398 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" event={"ID":"43707459-1078-4789-9cb5-b40d41b41d97","Type":"ContainerDied","Data":"e3c17650c36015bddeb43adbcb6a3b726f2e4f67ac70941b99aae79322a105dc"} Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.123178 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.309325 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-ceph\") pod \"43707459-1078-4789-9cb5-b40d41b41d97\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.310450 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnpt2\" (UniqueName: \"kubernetes.io/projected/43707459-1078-4789-9cb5-b40d41b41d97-kube-api-access-cnpt2\") pod \"43707459-1078-4789-9cb5-b40d41b41d97\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.310570 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-ssh-key\") pod \"43707459-1078-4789-9cb5-b40d41b41d97\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.310725 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-inventory\") pod \"43707459-1078-4789-9cb5-b40d41b41d97\" (UID: \"43707459-1078-4789-9cb5-b40d41b41d97\") " Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.320811 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43707459-1078-4789-9cb5-b40d41b41d97-kube-api-access-cnpt2" (OuterVolumeSpecName: "kube-api-access-cnpt2") pod "43707459-1078-4789-9cb5-b40d41b41d97" (UID: "43707459-1078-4789-9cb5-b40d41b41d97"). InnerVolumeSpecName "kube-api-access-cnpt2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.325088 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-ceph" (OuterVolumeSpecName: "ceph") pod "43707459-1078-4789-9cb5-b40d41b41d97" (UID: "43707459-1078-4789-9cb5-b40d41b41d97"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.339403 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "43707459-1078-4789-9cb5-b40d41b41d97" (UID: "43707459-1078-4789-9cb5-b40d41b41d97"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.348330 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-inventory" (OuterVolumeSpecName: "inventory") pod "43707459-1078-4789-9cb5-b40d41b41d97" (UID: "43707459-1078-4789-9cb5-b40d41b41d97"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.413449 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.413480 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnpt2\" (UniqueName: \"kubernetes.io/projected/43707459-1078-4789-9cb5-b40d41b41d97-kube-api-access-cnpt2\") on node \"crc\" DevicePath \"\"" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.413490 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.413499 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43707459-1078-4789-9cb5-b40d41b41d97-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.728626 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" event={"ID":"43707459-1078-4789-9cb5-b40d41b41d97","Type":"ContainerDied","Data":"ad00f15b0a2bd37f4c32ee86452e6138709942b6579563abdcd070282ed2a392"} Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.729008 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad00f15b0a2bd37f4c32ee86452e6138709942b6579563abdcd070282ed2a392" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.728659 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-4dch6" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.861137 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l"] Nov 25 16:00:31 crc kubenswrapper[4800]: E1125 16:00:31.861720 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43707459-1078-4789-9cb5-b40d41b41d97" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.861745 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="43707459-1078-4789-9cb5-b40d41b41d97" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 16:00:31 crc kubenswrapper[4800]: E1125 16:00:31.861794 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c852c73-a4a0-470b-a46d-98a1d7408f72" containerName="collect-profiles" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.861804 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c852c73-a4a0-470b-a46d-98a1d7408f72" containerName="collect-profiles" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.862065 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="43707459-1078-4789-9cb5-b40d41b41d97" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.862093 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c852c73-a4a0-470b-a46d-98a1d7408f72" containerName="collect-profiles" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.863068 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.869496 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.869858 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.871155 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.871281 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.871433 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 16:00:31 crc kubenswrapper[4800]: I1125 16:00:31.880671 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l"] Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.031929 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.032001 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.032137 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.032174 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q542v\" (UniqueName: \"kubernetes.io/projected/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-kube-api-access-q542v\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.135803 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.135930 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q542v\" (UniqueName: \"kubernetes.io/projected/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-kube-api-access-q542v\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.136272 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.136372 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.140403 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.141265 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.141276 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.167639 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q542v\" (UniqueName: \"kubernetes.io/projected/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-kube-api-access-q542v\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.195426 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:32 crc kubenswrapper[4800]: I1125 16:00:32.768543 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l"] Nov 25 16:00:32 crc kubenswrapper[4800]: W1125 16:00:32.772053 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5673027c_e855_4cd8_8ac8_ebbb5b6f0fee.slice/crio-93bc3f9c8ceb38a67b846436e20750a6085d7b219011bc7453b120c352e3ad90 WatchSource:0}: Error finding container 93bc3f9c8ceb38a67b846436e20750a6085d7b219011bc7453b120c352e3ad90: Status 404 returned error can't find the container with id 93bc3f9c8ceb38a67b846436e20750a6085d7b219011bc7453b120c352e3ad90 Nov 25 16:00:33 crc kubenswrapper[4800]: I1125 16:00:33.749143 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" event={"ID":"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee","Type":"ContainerStarted","Data":"337e4e9b4143f7946a6cc41bfeac2608a00a47f74d28d0f6a02032fbd933ac24"} Nov 25 16:00:33 crc kubenswrapper[4800]: I1125 16:00:33.749445 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" event={"ID":"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee","Type":"ContainerStarted","Data":"93bc3f9c8ceb38a67b846436e20750a6085d7b219011bc7453b120c352e3ad90"} Nov 25 16:00:33 crc kubenswrapper[4800]: I1125 16:00:33.775374 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" podStartSLOduration=2.267040626 podStartE2EDuration="2.775356977s" podCreationTimestamp="2025-11-25 16:00:31 +0000 UTC" firstStartedPulling="2025-11-25 16:00:32.775018887 +0000 UTC m=+2593.829427389" lastFinishedPulling="2025-11-25 16:00:33.283335258 +0000 UTC m=+2594.337743740" observedRunningTime="2025-11-25 16:00:33.766881148 +0000 UTC m=+2594.821289630" watchObservedRunningTime="2025-11-25 16:00:33.775356977 +0000 UTC m=+2594.829765449" Nov 25 16:00:35 crc kubenswrapper[4800]: I1125 16:00:35.785917 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 16:00:35 crc kubenswrapper[4800]: E1125 16:00:35.788019 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:00:37 crc kubenswrapper[4800]: I1125 16:00:37.784322 4800 generic.go:334] "Generic (PLEG): container finished" podID="5673027c-e855-4cd8-8ac8-ebbb5b6f0fee" containerID="337e4e9b4143f7946a6cc41bfeac2608a00a47f74d28d0f6a02032fbd933ac24" exitCode=0 Nov 25 16:00:37 crc kubenswrapper[4800]: I1125 16:00:37.798346 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" event={"ID":"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee","Type":"ContainerDied","Data":"337e4e9b4143f7946a6cc41bfeac2608a00a47f74d28d0f6a02032fbd933ac24"} Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.256430 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.427791 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-inventory\") pod \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.428017 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-ceph\") pod \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.428161 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q542v\" (UniqueName: \"kubernetes.io/projected/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-kube-api-access-q542v\") pod \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.428241 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-ssh-key\") pod \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\" (UID: \"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee\") " Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.433738 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-ceph" (OuterVolumeSpecName: "ceph") pod "5673027c-e855-4cd8-8ac8-ebbb5b6f0fee" (UID: "5673027c-e855-4cd8-8ac8-ebbb5b6f0fee"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.435366 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-kube-api-access-q542v" (OuterVolumeSpecName: "kube-api-access-q542v") pod "5673027c-e855-4cd8-8ac8-ebbb5b6f0fee" (UID: "5673027c-e855-4cd8-8ac8-ebbb5b6f0fee"). InnerVolumeSpecName "kube-api-access-q542v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.465354 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5673027c-e855-4cd8-8ac8-ebbb5b6f0fee" (UID: "5673027c-e855-4cd8-8ac8-ebbb5b6f0fee"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.475858 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-inventory" (OuterVolumeSpecName: "inventory") pod "5673027c-e855-4cd8-8ac8-ebbb5b6f0fee" (UID: "5673027c-e855-4cd8-8ac8-ebbb5b6f0fee"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.530828 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.530870 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.530880 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q542v\" (UniqueName: \"kubernetes.io/projected/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-kube-api-access-q542v\") on node \"crc\" DevicePath \"\"" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.530890 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5673027c-e855-4cd8-8ac8-ebbb5b6f0fee-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.804525 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" event={"ID":"5673027c-e855-4cd8-8ac8-ebbb5b6f0fee","Type":"ContainerDied","Data":"93bc3f9c8ceb38a67b846436e20750a6085d7b219011bc7453b120c352e3ad90"} Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.804571 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93bc3f9c8ceb38a67b846436e20750a6085d7b219011bc7453b120c352e3ad90" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.804622 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.908635 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g"] Nov 25 16:00:39 crc kubenswrapper[4800]: E1125 16:00:39.909409 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5673027c-e855-4cd8-8ac8-ebbb5b6f0fee" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.909504 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5673027c-e855-4cd8-8ac8-ebbb5b6f0fee" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.909863 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="5673027c-e855-4cd8-8ac8-ebbb5b6f0fee" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.910929 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.913820 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.914102 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.914472 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.915030 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.916097 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.921037 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g"] Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.954672 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.954735 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.954848 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:39 crc kubenswrapper[4800]: I1125 16:00:39.955169 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8d72\" (UniqueName: \"kubernetes.io/projected/4d64fe72-409b-48a7-88a0-0a35d4c86918-kube-api-access-t8d72\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:40 crc kubenswrapper[4800]: I1125 16:00:40.056222 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:40 crc kubenswrapper[4800]: I1125 16:00:40.056589 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8d72\" (UniqueName: \"kubernetes.io/projected/4d64fe72-409b-48a7-88a0-0a35d4c86918-kube-api-access-t8d72\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:40 crc kubenswrapper[4800]: I1125 16:00:40.056681 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:40 crc kubenswrapper[4800]: I1125 16:00:40.056819 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:40 crc kubenswrapper[4800]: I1125 16:00:40.062218 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:40 crc kubenswrapper[4800]: I1125 16:00:40.064331 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:40 crc kubenswrapper[4800]: I1125 16:00:40.064602 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:40 crc kubenswrapper[4800]: I1125 16:00:40.073112 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8d72\" (UniqueName: \"kubernetes.io/projected/4d64fe72-409b-48a7-88a0-0a35d4c86918-kube-api-access-t8d72\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:40 crc kubenswrapper[4800]: I1125 16:00:40.226943 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:00:40 crc kubenswrapper[4800]: I1125 16:00:40.784527 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g"] Nov 25 16:00:40 crc kubenswrapper[4800]: I1125 16:00:40.813387 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" event={"ID":"4d64fe72-409b-48a7-88a0-0a35d4c86918","Type":"ContainerStarted","Data":"00cfcceca2f3ab512c784313b2e864e20283880256dbdb0f95c6050634b31ade"} Nov 25 16:00:41 crc kubenswrapper[4800]: I1125 16:00:41.838396 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" event={"ID":"4d64fe72-409b-48a7-88a0-0a35d4c86918","Type":"ContainerStarted","Data":"9b2e18a9021f32f6bcdcc604639855681771bb364efce455b68418de1dc8551a"} Nov 25 16:00:41 crc kubenswrapper[4800]: I1125 16:00:41.858383 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" podStartSLOduration=2.112394461 podStartE2EDuration="2.858046321s" podCreationTimestamp="2025-11-25 16:00:39 +0000 UTC" firstStartedPulling="2025-11-25 16:00:40.792397799 +0000 UTC m=+2601.846806281" lastFinishedPulling="2025-11-25 16:00:41.538049659 +0000 UTC m=+2602.592458141" observedRunningTime="2025-11-25 16:00:41.848371899 +0000 UTC m=+2602.902780381" watchObservedRunningTime="2025-11-25 16:00:41.858046321 +0000 UTC m=+2602.912454803" Nov 25 16:00:48 crc kubenswrapper[4800]: I1125 16:00:48.785355 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 16:00:48 crc kubenswrapper[4800]: E1125 16:00:48.786146 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.150227 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401441-6vqq9"] Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.152769 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.172680 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401441-6vqq9"] Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.353421 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-fernet-keys\") pod \"keystone-cron-29401441-6vqq9\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.353499 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-combined-ca-bundle\") pod \"keystone-cron-29401441-6vqq9\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.353566 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-config-data\") pod \"keystone-cron-29401441-6vqq9\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.353597 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zz65d\" (UniqueName: \"kubernetes.io/projected/6e97e4f5-608b-41f3-94c0-bee108e519ea-kube-api-access-zz65d\") pod \"keystone-cron-29401441-6vqq9\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.456103 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-fernet-keys\") pod \"keystone-cron-29401441-6vqq9\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.456483 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-combined-ca-bundle\") pod \"keystone-cron-29401441-6vqq9\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.456548 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-config-data\") pod \"keystone-cron-29401441-6vqq9\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.456580 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zz65d\" (UniqueName: \"kubernetes.io/projected/6e97e4f5-608b-41f3-94c0-bee108e519ea-kube-api-access-zz65d\") pod \"keystone-cron-29401441-6vqq9\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.465930 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-config-data\") pod \"keystone-cron-29401441-6vqq9\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.469029 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-fernet-keys\") pod \"keystone-cron-29401441-6vqq9\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.475232 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-combined-ca-bundle\") pod \"keystone-cron-29401441-6vqq9\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.482057 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zz65d\" (UniqueName: \"kubernetes.io/projected/6e97e4f5-608b-41f3-94c0-bee108e519ea-kube-api-access-zz65d\") pod \"keystone-cron-29401441-6vqq9\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:00 crc kubenswrapper[4800]: I1125 16:01:00.518188 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:01 crc kubenswrapper[4800]: I1125 16:01:01.040491 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401441-6vqq9"] Nov 25 16:01:02 crc kubenswrapper[4800]: I1125 16:01:02.055176 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401441-6vqq9" event={"ID":"6e97e4f5-608b-41f3-94c0-bee108e519ea","Type":"ContainerStarted","Data":"43cbcfa24a329747cbac328b09eeea0bfefe8455a7eca18c7b7bc39c5f4becb7"} Nov 25 16:01:02 crc kubenswrapper[4800]: I1125 16:01:02.055675 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401441-6vqq9" event={"ID":"6e97e4f5-608b-41f3-94c0-bee108e519ea","Type":"ContainerStarted","Data":"9bd820c49558b4f648c1a06137e4ad5b9838cc357eee035dc2d51a2a509171cb"} Nov 25 16:01:02 crc kubenswrapper[4800]: I1125 16:01:02.084812 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401441-6vqq9" podStartSLOduration=2.08479439 podStartE2EDuration="2.08479439s" podCreationTimestamp="2025-11-25 16:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:01:02.079146467 +0000 UTC m=+2623.133554959" watchObservedRunningTime="2025-11-25 16:01:02.08479439 +0000 UTC m=+2623.139202872" Nov 25 16:01:03 crc kubenswrapper[4800]: I1125 16:01:03.785664 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 16:01:03 crc kubenswrapper[4800]: E1125 16:01:03.786209 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:01:04 crc kubenswrapper[4800]: I1125 16:01:04.075368 4800 generic.go:334] "Generic (PLEG): container finished" podID="6e97e4f5-608b-41f3-94c0-bee108e519ea" containerID="43cbcfa24a329747cbac328b09eeea0bfefe8455a7eca18c7b7bc39c5f4becb7" exitCode=0 Nov 25 16:01:04 crc kubenswrapper[4800]: I1125 16:01:04.075423 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401441-6vqq9" event={"ID":"6e97e4f5-608b-41f3-94c0-bee108e519ea","Type":"ContainerDied","Data":"43cbcfa24a329747cbac328b09eeea0bfefe8455a7eca18c7b7bc39c5f4becb7"} Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.405993 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.475115 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-fernet-keys\") pod \"6e97e4f5-608b-41f3-94c0-bee108e519ea\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.475247 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zz65d\" (UniqueName: \"kubernetes.io/projected/6e97e4f5-608b-41f3-94c0-bee108e519ea-kube-api-access-zz65d\") pod \"6e97e4f5-608b-41f3-94c0-bee108e519ea\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.475342 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-combined-ca-bundle\") pod \"6e97e4f5-608b-41f3-94c0-bee108e519ea\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.475486 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-config-data\") pod \"6e97e4f5-608b-41f3-94c0-bee108e519ea\" (UID: \"6e97e4f5-608b-41f3-94c0-bee108e519ea\") " Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.481410 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6e97e4f5-608b-41f3-94c0-bee108e519ea" (UID: "6e97e4f5-608b-41f3-94c0-bee108e519ea"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.481643 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e97e4f5-608b-41f3-94c0-bee108e519ea-kube-api-access-zz65d" (OuterVolumeSpecName: "kube-api-access-zz65d") pod "6e97e4f5-608b-41f3-94c0-bee108e519ea" (UID: "6e97e4f5-608b-41f3-94c0-bee108e519ea"). InnerVolumeSpecName "kube-api-access-zz65d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.508063 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e97e4f5-608b-41f3-94c0-bee108e519ea" (UID: "6e97e4f5-608b-41f3-94c0-bee108e519ea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.533873 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-config-data" (OuterVolumeSpecName: "config-data") pod "6e97e4f5-608b-41f3-94c0-bee108e519ea" (UID: "6e97e4f5-608b-41f3-94c0-bee108e519ea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.577987 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.578034 4800 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.578048 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zz65d\" (UniqueName: \"kubernetes.io/projected/6e97e4f5-608b-41f3-94c0-bee108e519ea-kube-api-access-zz65d\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:05 crc kubenswrapper[4800]: I1125 16:01:05.578063 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e97e4f5-608b-41f3-94c0-bee108e519ea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:06 crc kubenswrapper[4800]: I1125 16:01:06.094186 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401441-6vqq9" event={"ID":"6e97e4f5-608b-41f3-94c0-bee108e519ea","Type":"ContainerDied","Data":"9bd820c49558b4f648c1a06137e4ad5b9838cc357eee035dc2d51a2a509171cb"} Nov 25 16:01:06 crc kubenswrapper[4800]: I1125 16:01:06.094240 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bd820c49558b4f648c1a06137e4ad5b9838cc357eee035dc2d51a2a509171cb" Nov 25 16:01:06 crc kubenswrapper[4800]: I1125 16:01:06.094317 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401441-6vqq9" Nov 25 16:01:14 crc kubenswrapper[4800]: I1125 16:01:14.786999 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 16:01:14 crc kubenswrapper[4800]: E1125 16:01:14.788686 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:01:25 crc kubenswrapper[4800]: I1125 16:01:25.284046 4800 generic.go:334] "Generic (PLEG): container finished" podID="4d64fe72-409b-48a7-88a0-0a35d4c86918" containerID="9b2e18a9021f32f6bcdcc604639855681771bb364efce455b68418de1dc8551a" exitCode=0 Nov 25 16:01:25 crc kubenswrapper[4800]: I1125 16:01:25.284094 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" event={"ID":"4d64fe72-409b-48a7-88a0-0a35d4c86918","Type":"ContainerDied","Data":"9b2e18a9021f32f6bcdcc604639855681771bb364efce455b68418de1dc8551a"} Nov 25 16:01:26 crc kubenswrapper[4800]: I1125 16:01:26.789661 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 16:01:26 crc kubenswrapper[4800]: E1125 16:01:26.791154 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.052900 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.161158 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-ssh-key\") pod \"4d64fe72-409b-48a7-88a0-0a35d4c86918\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.161258 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8d72\" (UniqueName: \"kubernetes.io/projected/4d64fe72-409b-48a7-88a0-0a35d4c86918-kube-api-access-t8d72\") pod \"4d64fe72-409b-48a7-88a0-0a35d4c86918\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.161320 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-ceph\") pod \"4d64fe72-409b-48a7-88a0-0a35d4c86918\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.161351 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-inventory\") pod \"4d64fe72-409b-48a7-88a0-0a35d4c86918\" (UID: \"4d64fe72-409b-48a7-88a0-0a35d4c86918\") " Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.169667 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d64fe72-409b-48a7-88a0-0a35d4c86918-kube-api-access-t8d72" (OuterVolumeSpecName: "kube-api-access-t8d72") pod "4d64fe72-409b-48a7-88a0-0a35d4c86918" (UID: "4d64fe72-409b-48a7-88a0-0a35d4c86918"). InnerVolumeSpecName "kube-api-access-t8d72". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.170081 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-ceph" (OuterVolumeSpecName: "ceph") pod "4d64fe72-409b-48a7-88a0-0a35d4c86918" (UID: "4d64fe72-409b-48a7-88a0-0a35d4c86918"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.199453 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-inventory" (OuterVolumeSpecName: "inventory") pod "4d64fe72-409b-48a7-88a0-0a35d4c86918" (UID: "4d64fe72-409b-48a7-88a0-0a35d4c86918"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.203823 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4d64fe72-409b-48a7-88a0-0a35d4c86918" (UID: "4d64fe72-409b-48a7-88a0-0a35d4c86918"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.264688 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.266168 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8d72\" (UniqueName: \"kubernetes.io/projected/4d64fe72-409b-48a7-88a0-0a35d4c86918-kube-api-access-t8d72\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.266218 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.266236 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4d64fe72-409b-48a7-88a0-0a35d4c86918-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.306519 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" event={"ID":"4d64fe72-409b-48a7-88a0-0a35d4c86918","Type":"ContainerDied","Data":"00cfcceca2f3ab512c784313b2e864e20283880256dbdb0f95c6050634b31ade"} Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.306581 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00cfcceca2f3ab512c784313b2e864e20283880256dbdb0f95c6050634b31ade" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.306666 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.423530 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-ddspx"] Nov 25 16:01:27 crc kubenswrapper[4800]: E1125 16:01:27.423913 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d64fe72-409b-48a7-88a0-0a35d4c86918" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.423928 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d64fe72-409b-48a7-88a0-0a35d4c86918" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 16:01:27 crc kubenswrapper[4800]: E1125 16:01:27.423951 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e97e4f5-608b-41f3-94c0-bee108e519ea" containerName="keystone-cron" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.423958 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e97e4f5-608b-41f3-94c0-bee108e519ea" containerName="keystone-cron" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.424111 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e97e4f5-608b-41f3-94c0-bee108e519ea" containerName="keystone-cron" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.424126 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d64fe72-409b-48a7-88a0-0a35d4c86918" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.424724 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.428202 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.428293 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.428446 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.428777 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.429134 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.442806 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-ddspx"] Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.573029 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-ceph\") pod \"ssh-known-hosts-edpm-deployment-ddspx\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.573091 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-ddspx\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.582297 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-ddspx\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.582484 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdbbn\" (UniqueName: \"kubernetes.io/projected/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-kube-api-access-xdbbn\") pod \"ssh-known-hosts-edpm-deployment-ddspx\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.686550 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-ceph\") pod \"ssh-known-hosts-edpm-deployment-ddspx\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.686653 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-ddspx\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.686737 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-ddspx\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.686814 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdbbn\" (UniqueName: \"kubernetes.io/projected/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-kube-api-access-xdbbn\") pod \"ssh-known-hosts-edpm-deployment-ddspx\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.692555 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-ddspx\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.692806 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-ceph\") pod \"ssh-known-hosts-edpm-deployment-ddspx\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.699003 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-ddspx\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.708202 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdbbn\" (UniqueName: \"kubernetes.io/projected/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-kube-api-access-xdbbn\") pod \"ssh-known-hosts-edpm-deployment-ddspx\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:27 crc kubenswrapper[4800]: I1125 16:01:27.746590 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:28 crc kubenswrapper[4800]: I1125 16:01:28.415806 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-ddspx"] Nov 25 16:01:29 crc kubenswrapper[4800]: I1125 16:01:29.329330 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" event={"ID":"1c5fea3a-9dfb-4d9f-8401-d9769c59d563","Type":"ContainerStarted","Data":"06040a1d74cb9350f319d108b96afac9bb162e60220f16230453fd291451e21b"} Nov 25 16:01:29 crc kubenswrapper[4800]: I1125 16:01:29.330302 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" event={"ID":"1c5fea3a-9dfb-4d9f-8401-d9769c59d563","Type":"ContainerStarted","Data":"c89d2f130ec3a5b0d5ec6b7a90265c88332ded62caf6c6ba65898c1b113a008b"} Nov 25 16:01:29 crc kubenswrapper[4800]: I1125 16:01:29.360237 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" podStartSLOduration=1.900426755 podStartE2EDuration="2.360199219s" podCreationTimestamp="2025-11-25 16:01:27 +0000 UTC" firstStartedPulling="2025-11-25 16:01:28.43069994 +0000 UTC m=+2649.485108422" lastFinishedPulling="2025-11-25 16:01:28.890472404 +0000 UTC m=+2649.944880886" observedRunningTime="2025-11-25 16:01:29.356781296 +0000 UTC m=+2650.411189798" watchObservedRunningTime="2025-11-25 16:01:29.360199219 +0000 UTC m=+2650.414607701" Nov 25 16:01:38 crc kubenswrapper[4800]: I1125 16:01:38.421733 4800 generic.go:334] "Generic (PLEG): container finished" podID="1c5fea3a-9dfb-4d9f-8401-d9769c59d563" containerID="06040a1d74cb9350f319d108b96afac9bb162e60220f16230453fd291451e21b" exitCode=0 Nov 25 16:01:38 crc kubenswrapper[4800]: I1125 16:01:38.422028 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" event={"ID":"1c5fea3a-9dfb-4d9f-8401-d9769c59d563","Type":"ContainerDied","Data":"06040a1d74cb9350f319d108b96afac9bb162e60220f16230453fd291451e21b"} Nov 25 16:01:39 crc kubenswrapper[4800]: I1125 16:01:39.960796 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.064812 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-ssh-key-openstack-edpm-ipam\") pod \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.065033 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-inventory-0\") pod \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.065082 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdbbn\" (UniqueName: \"kubernetes.io/projected/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-kube-api-access-xdbbn\") pod \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.065285 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-ceph\") pod \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\" (UID: \"1c5fea3a-9dfb-4d9f-8401-d9769c59d563\") " Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.078302 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-kube-api-access-xdbbn" (OuterVolumeSpecName: "kube-api-access-xdbbn") pod "1c5fea3a-9dfb-4d9f-8401-d9769c59d563" (UID: "1c5fea3a-9dfb-4d9f-8401-d9769c59d563"). InnerVolumeSpecName "kube-api-access-xdbbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.087092 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-ceph" (OuterVolumeSpecName: "ceph") pod "1c5fea3a-9dfb-4d9f-8401-d9769c59d563" (UID: "1c5fea3a-9dfb-4d9f-8401-d9769c59d563"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.129097 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1c5fea3a-9dfb-4d9f-8401-d9769c59d563" (UID: "1c5fea3a-9dfb-4d9f-8401-d9769c59d563"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.169615 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "1c5fea3a-9dfb-4d9f-8401-d9769c59d563" (UID: "1c5fea3a-9dfb-4d9f-8401-d9769c59d563"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.169647 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.169738 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdbbn\" (UniqueName: \"kubernetes.io/projected/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-kube-api-access-xdbbn\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.169754 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.271812 4800 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1c5fea3a-9dfb-4d9f-8401-d9769c59d563-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.444516 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" event={"ID":"1c5fea3a-9dfb-4d9f-8401-d9769c59d563","Type":"ContainerDied","Data":"c89d2f130ec3a5b0d5ec6b7a90265c88332ded62caf6c6ba65898c1b113a008b"} Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.444966 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c89d2f130ec3a5b0d5ec6b7a90265c88332ded62caf6c6ba65898c1b113a008b" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.444661 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-ddspx" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.574371 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv"] Nov 25 16:01:40 crc kubenswrapper[4800]: E1125 16:01:40.574825 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c5fea3a-9dfb-4d9f-8401-d9769c59d563" containerName="ssh-known-hosts-edpm-deployment" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.574872 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c5fea3a-9dfb-4d9f-8401-d9769c59d563" containerName="ssh-known-hosts-edpm-deployment" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.575035 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c5fea3a-9dfb-4d9f-8401-d9769c59d563" containerName="ssh-known-hosts-edpm-deployment" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.575680 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.581761 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.582042 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.582333 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.583081 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.583245 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.590747 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv"] Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.679285 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-qwbdv\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.679361 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-qwbdv\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.679485 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-qwbdv\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.679523 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkkpb\" (UniqueName: \"kubernetes.io/projected/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-kube-api-access-dkkpb\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-qwbdv\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.781869 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-qwbdv\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.781930 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-qwbdv\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.781986 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-qwbdv\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.782020 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkkpb\" (UniqueName: \"kubernetes.io/projected/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-kube-api-access-dkkpb\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-qwbdv\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.786729 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 16:01:40 crc kubenswrapper[4800]: E1125 16:01:40.787393 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.788397 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-qwbdv\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.788576 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-qwbdv\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.793651 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-qwbdv\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.804678 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkkpb\" (UniqueName: \"kubernetes.io/projected/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-kube-api-access-dkkpb\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-qwbdv\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:40 crc kubenswrapper[4800]: I1125 16:01:40.903508 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:41 crc kubenswrapper[4800]: I1125 16:01:41.527719 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv"] Nov 25 16:01:42 crc kubenswrapper[4800]: I1125 16:01:42.466106 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" event={"ID":"932a8ec2-09ef-4b4e-8dc1-0a2342efb164","Type":"ContainerStarted","Data":"bd0c052b9a546cc2a4ef3d5347afe245be77e705eba4edfd53c25ae12267965f"} Nov 25 16:01:43 crc kubenswrapper[4800]: I1125 16:01:43.478099 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" event={"ID":"932a8ec2-09ef-4b4e-8dc1-0a2342efb164","Type":"ContainerStarted","Data":"98b1583a5a5ef7d4cf932ab45befd0313ddc8a4928ddc0caa57bde439e349f53"} Nov 25 16:01:43 crc kubenswrapper[4800]: I1125 16:01:43.509171 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" podStartSLOduration=2.899791047 podStartE2EDuration="3.509141679s" podCreationTimestamp="2025-11-25 16:01:40 +0000 UTC" firstStartedPulling="2025-11-25 16:01:41.543328863 +0000 UTC m=+2662.597737345" lastFinishedPulling="2025-11-25 16:01:42.152679495 +0000 UTC m=+2663.207087977" observedRunningTime="2025-11-25 16:01:43.50327548 +0000 UTC m=+2664.557683992" watchObservedRunningTime="2025-11-25 16:01:43.509141679 +0000 UTC m=+2664.563550161" Nov 25 16:01:50 crc kubenswrapper[4800]: I1125 16:01:50.549818 4800 generic.go:334] "Generic (PLEG): container finished" podID="932a8ec2-09ef-4b4e-8dc1-0a2342efb164" containerID="98b1583a5a5ef7d4cf932ab45befd0313ddc8a4928ddc0caa57bde439e349f53" exitCode=0 Nov 25 16:01:50 crc kubenswrapper[4800]: I1125 16:01:50.550706 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" event={"ID":"932a8ec2-09ef-4b4e-8dc1-0a2342efb164","Type":"ContainerDied","Data":"98b1583a5a5ef7d4cf932ab45befd0313ddc8a4928ddc0caa57bde439e349f53"} Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.065679 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.173522 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dkkpb\" (UniqueName: \"kubernetes.io/projected/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-kube-api-access-dkkpb\") pod \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.173655 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-ceph\") pod \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.173751 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-inventory\") pod \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.173795 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-ssh-key\") pod \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\" (UID: \"932a8ec2-09ef-4b4e-8dc1-0a2342efb164\") " Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.181688 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-kube-api-access-dkkpb" (OuterVolumeSpecName: "kube-api-access-dkkpb") pod "932a8ec2-09ef-4b4e-8dc1-0a2342efb164" (UID: "932a8ec2-09ef-4b4e-8dc1-0a2342efb164"). InnerVolumeSpecName "kube-api-access-dkkpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.182080 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-ceph" (OuterVolumeSpecName: "ceph") pod "932a8ec2-09ef-4b4e-8dc1-0a2342efb164" (UID: "932a8ec2-09ef-4b4e-8dc1-0a2342efb164"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.222184 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "932a8ec2-09ef-4b4e-8dc1-0a2342efb164" (UID: "932a8ec2-09ef-4b4e-8dc1-0a2342efb164"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.222801 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-inventory" (OuterVolumeSpecName: "inventory") pod "932a8ec2-09ef-4b4e-8dc1-0a2342efb164" (UID: "932a8ec2-09ef-4b4e-8dc1-0a2342efb164"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.279522 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.279572 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dkkpb\" (UniqueName: \"kubernetes.io/projected/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-kube-api-access-dkkpb\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.279590 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.279602 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/932a8ec2-09ef-4b4e-8dc1-0a2342efb164-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.569487 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" event={"ID":"932a8ec2-09ef-4b4e-8dc1-0a2342efb164","Type":"ContainerDied","Data":"bd0c052b9a546cc2a4ef3d5347afe245be77e705eba4edfd53c25ae12267965f"} Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.569538 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd0c052b9a546cc2a4ef3d5347afe245be77e705eba4edfd53c25ae12267965f" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.569556 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-qwbdv" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.762967 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864"] Nov 25 16:01:52 crc kubenswrapper[4800]: E1125 16:01:52.763882 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932a8ec2-09ef-4b4e-8dc1-0a2342efb164" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.763912 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="932a8ec2-09ef-4b4e-8dc1-0a2342efb164" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.764279 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="932a8ec2-09ef-4b4e-8dc1-0a2342efb164" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.765281 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.774420 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.774773 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.774920 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.775069 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.775188 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.781977 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864"] Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.786711 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 16:01:52 crc kubenswrapper[4800]: E1125 16:01:52.787058 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.893340 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z7864\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.893397 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z7864\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.893502 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hdzz\" (UniqueName: \"kubernetes.io/projected/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-kube-api-access-4hdzz\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z7864\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.893549 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z7864\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.995872 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z7864\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.995930 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z7864\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.996022 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hdzz\" (UniqueName: \"kubernetes.io/projected/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-kube-api-access-4hdzz\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z7864\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:52 crc kubenswrapper[4800]: I1125 16:01:52.996083 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z7864\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:53 crc kubenswrapper[4800]: I1125 16:01:53.002602 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z7864\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:53 crc kubenswrapper[4800]: I1125 16:01:53.003052 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z7864\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:53 crc kubenswrapper[4800]: I1125 16:01:53.003291 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z7864\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:53 crc kubenswrapper[4800]: I1125 16:01:53.026448 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hdzz\" (UniqueName: \"kubernetes.io/projected/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-kube-api-access-4hdzz\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z7864\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:53 crc kubenswrapper[4800]: I1125 16:01:53.111376 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:01:53 crc kubenswrapper[4800]: W1125 16:01:53.586304 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44834c3e_e154_47ea_9c26_62f7f7ee5cb8.slice/crio-0e596482f5a7f7214bb6098b8abbd16cd97d5717e15f6f7a52e5fe37a75706e4 WatchSource:0}: Error finding container 0e596482f5a7f7214bb6098b8abbd16cd97d5717e15f6f7a52e5fe37a75706e4: Status 404 returned error can't find the container with id 0e596482f5a7f7214bb6098b8abbd16cd97d5717e15f6f7a52e5fe37a75706e4 Nov 25 16:01:53 crc kubenswrapper[4800]: I1125 16:01:53.588716 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864"] Nov 25 16:01:54 crc kubenswrapper[4800]: I1125 16:01:54.615446 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" event={"ID":"44834c3e-e154-47ea-9c26-62f7f7ee5cb8","Type":"ContainerStarted","Data":"0e596482f5a7f7214bb6098b8abbd16cd97d5717e15f6f7a52e5fe37a75706e4"} Nov 25 16:01:56 crc kubenswrapper[4800]: I1125 16:01:56.639886 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" event={"ID":"44834c3e-e154-47ea-9c26-62f7f7ee5cb8","Type":"ContainerStarted","Data":"698f7011195e1b263535ff68be1c2c4a1344212bb3803000afd13122753dda14"} Nov 25 16:01:56 crc kubenswrapper[4800]: I1125 16:01:56.674945 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" podStartSLOduration=2.909808022 podStartE2EDuration="4.674923481s" podCreationTimestamp="2025-11-25 16:01:52 +0000 UTC" firstStartedPulling="2025-11-25 16:01:53.589783578 +0000 UTC m=+2674.644192060" lastFinishedPulling="2025-11-25 16:01:55.354899017 +0000 UTC m=+2676.409307519" observedRunningTime="2025-11-25 16:01:56.663382498 +0000 UTC m=+2677.717790980" watchObservedRunningTime="2025-11-25 16:01:56.674923481 +0000 UTC m=+2677.729331973" Nov 25 16:02:04 crc kubenswrapper[4800]: I1125 16:02:04.786494 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 16:02:04 crc kubenswrapper[4800]: E1125 16:02:04.788219 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:02:05 crc kubenswrapper[4800]: I1125 16:02:05.744698 4800 generic.go:334] "Generic (PLEG): container finished" podID="44834c3e-e154-47ea-9c26-62f7f7ee5cb8" containerID="698f7011195e1b263535ff68be1c2c4a1344212bb3803000afd13122753dda14" exitCode=0 Nov 25 16:02:05 crc kubenswrapper[4800]: I1125 16:02:05.744784 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" event={"ID":"44834c3e-e154-47ea-9c26-62f7f7ee5cb8","Type":"ContainerDied","Data":"698f7011195e1b263535ff68be1c2c4a1344212bb3803000afd13122753dda14"} Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.160405 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.346720 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-ceph\") pod \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.346873 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4hdzz\" (UniqueName: \"kubernetes.io/projected/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-kube-api-access-4hdzz\") pod \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.346914 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-ssh-key\") pod \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.347087 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-inventory\") pod \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\" (UID: \"44834c3e-e154-47ea-9c26-62f7f7ee5cb8\") " Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.355056 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-kube-api-access-4hdzz" (OuterVolumeSpecName: "kube-api-access-4hdzz") pod "44834c3e-e154-47ea-9c26-62f7f7ee5cb8" (UID: "44834c3e-e154-47ea-9c26-62f7f7ee5cb8"). InnerVolumeSpecName "kube-api-access-4hdzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.355758 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-ceph" (OuterVolumeSpecName: "ceph") pod "44834c3e-e154-47ea-9c26-62f7f7ee5cb8" (UID: "44834c3e-e154-47ea-9c26-62f7f7ee5cb8"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.381125 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "44834c3e-e154-47ea-9c26-62f7f7ee5cb8" (UID: "44834c3e-e154-47ea-9c26-62f7f7ee5cb8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.384293 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-inventory" (OuterVolumeSpecName: "inventory") pod "44834c3e-e154-47ea-9c26-62f7f7ee5cb8" (UID: "44834c3e-e154-47ea-9c26-62f7f7ee5cb8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.450253 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.450331 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.450349 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4hdzz\" (UniqueName: \"kubernetes.io/projected/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-kube-api-access-4hdzz\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.450365 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44834c3e-e154-47ea-9c26-62f7f7ee5cb8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.769596 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" event={"ID":"44834c3e-e154-47ea-9c26-62f7f7ee5cb8","Type":"ContainerDied","Data":"0e596482f5a7f7214bb6098b8abbd16cd97d5717e15f6f7a52e5fe37a75706e4"} Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.769669 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e596482f5a7f7214bb6098b8abbd16cd97d5717e15f6f7a52e5fe37a75706e4" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.769712 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z7864" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.930830 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p"] Nov 25 16:02:07 crc kubenswrapper[4800]: E1125 16:02:07.931493 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44834c3e-e154-47ea-9c26-62f7f7ee5cb8" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.931514 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="44834c3e-e154-47ea-9c26-62f7f7ee5cb8" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.931722 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="44834c3e-e154-47ea-9c26-62f7f7ee5cb8" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.932492 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.934711 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.934959 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.935130 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.935201 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.936565 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.936810 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.936987 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.939236 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 16:02:07 crc kubenswrapper[4800]: I1125 16:02:07.952513 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p"] Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.065532 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.065607 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.065641 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.066451 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.066757 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.066809 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.066905 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.066997 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.067156 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.067219 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.067299 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.067364 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2ppt\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-kube-api-access-l2ppt\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.067387 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170000 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170145 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2ppt\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-kube-api-access-l2ppt\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170223 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170347 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170403 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170454 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170567 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170682 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170726 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170758 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170798 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170892 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.170934 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.179760 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.179787 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.180012 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.180024 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.181880 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.182271 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.183046 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.183335 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.184082 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.184162 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.184450 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.186353 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.189434 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2ppt\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-kube-api-access-l2ppt\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.260250 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:08 crc kubenswrapper[4800]: I1125 16:02:08.844738 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p"] Nov 25 16:02:08 crc kubenswrapper[4800]: W1125 16:02:08.848426 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c6f9a82_e6dc_4bb9_af5e_86f7c71871b1.slice/crio-84072b8809d0590b862579542e2aa1245176e9db2fbae4d76131f883bbec5b2d WatchSource:0}: Error finding container 84072b8809d0590b862579542e2aa1245176e9db2fbae4d76131f883bbec5b2d: Status 404 returned error can't find the container with id 84072b8809d0590b862579542e2aa1245176e9db2fbae4d76131f883bbec5b2d Nov 25 16:02:09 crc kubenswrapper[4800]: I1125 16:02:09.800298 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" event={"ID":"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1","Type":"ContainerStarted","Data":"84072b8809d0590b862579542e2aa1245176e9db2fbae4d76131f883bbec5b2d"} Nov 25 16:02:10 crc kubenswrapper[4800]: I1125 16:02:10.808058 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" event={"ID":"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1","Type":"ContainerStarted","Data":"2a3e9bfbaa0827f5fa73b0db0c879c887ae6165665065b5f03f016e588e39d68"} Nov 25 16:02:10 crc kubenswrapper[4800]: I1125 16:02:10.854298 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" podStartSLOduration=2.887666241 podStartE2EDuration="3.854267496s" podCreationTimestamp="2025-11-25 16:02:07 +0000 UTC" firstStartedPulling="2025-11-25 16:02:08.851978141 +0000 UTC m=+2689.906386623" lastFinishedPulling="2025-11-25 16:02:09.818579396 +0000 UTC m=+2690.872987878" observedRunningTime="2025-11-25 16:02:10.836866614 +0000 UTC m=+2691.891275116" watchObservedRunningTime="2025-11-25 16:02:10.854267496 +0000 UTC m=+2691.908676018" Nov 25 16:02:19 crc kubenswrapper[4800]: I1125 16:02:19.794685 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 16:02:20 crc kubenswrapper[4800]: I1125 16:02:20.944599 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"9327b11d27cb5d471850160152581b6be41da7771b8188b3e0bff16429c3b225"} Nov 25 16:02:44 crc kubenswrapper[4800]: I1125 16:02:44.632392 4800 generic.go:334] "Generic (PLEG): container finished" podID="3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" containerID="2a3e9bfbaa0827f5fa73b0db0c879c887ae6165665065b5f03f016e588e39d68" exitCode=0 Nov 25 16:02:44 crc kubenswrapper[4800]: I1125 16:02:44.632467 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" event={"ID":"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1","Type":"ContainerDied","Data":"2a3e9bfbaa0827f5fa73b0db0c879c887ae6165665065b5f03f016e588e39d68"} Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.090492 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167390 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-ovn-default-certs-0\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167490 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2ppt\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-kube-api-access-l2ppt\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167538 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-neutron-metadata-combined-ca-bundle\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167558 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-inventory\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167612 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ovn-combined-ca-bundle\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167652 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-nova-combined-ca-bundle\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167698 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-libvirt-combined-ca-bundle\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167722 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-repo-setup-combined-ca-bundle\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167762 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167793 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ssh-key\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167834 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-bootstrap-combined-ca-bundle\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167893 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ceph\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.167952 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\" (UID: \"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1\") " Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.175253 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.175673 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.176358 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.176357 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ceph" (OuterVolumeSpecName: "ceph") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.176459 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.177019 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.177073 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-kube-api-access-l2ppt" (OuterVolumeSpecName: "kube-api-access-l2ppt") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "kube-api-access-l2ppt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.177613 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.181100 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.181276 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.185123 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.197268 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-inventory" (OuterVolumeSpecName: "inventory") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.197285 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" (UID: "3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.269971 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.270003 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2ppt\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-kube-api-access-l2ppt\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.270014 4800 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.270023 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.270034 4800 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.270045 4800 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.270053 4800 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.270062 4800 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.270071 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.270080 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.270088 4800 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.270097 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.270106 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.659546 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" event={"ID":"3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1","Type":"ContainerDied","Data":"84072b8809d0590b862579542e2aa1245176e9db2fbae4d76131f883bbec5b2d"} Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.659622 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84072b8809d0590b862579542e2aa1245176e9db2fbae4d76131f883bbec5b2d" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.659677 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.825625 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq"] Nov 25 16:02:46 crc kubenswrapper[4800]: E1125 16:02:46.826258 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.826294 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.826589 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.827633 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.830238 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.830346 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.830392 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.830509 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.831254 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.840488 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq"] Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.883183 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.883273 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dnts\" (UniqueName: \"kubernetes.io/projected/5f049eb4-684a-4deb-8305-37d851e0431c-kube-api-access-6dnts\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.883306 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.883398 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.984096 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.984260 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.984328 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dnts\" (UniqueName: \"kubernetes.io/projected/5f049eb4-684a-4deb-8305-37d851e0431c-kube-api-access-6dnts\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.984367 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.989309 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.991186 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:46 crc kubenswrapper[4800]: I1125 16:02:46.994439 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:47 crc kubenswrapper[4800]: I1125 16:02:47.018901 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dnts\" (UniqueName: \"kubernetes.io/projected/5f049eb4-684a-4deb-8305-37d851e0431c-kube-api-access-6dnts\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:47 crc kubenswrapper[4800]: I1125 16:02:47.155941 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:47 crc kubenswrapper[4800]: I1125 16:02:47.716034 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq"] Nov 25 16:02:48 crc kubenswrapper[4800]: I1125 16:02:48.677104 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" event={"ID":"5f049eb4-684a-4deb-8305-37d851e0431c","Type":"ContainerStarted","Data":"222acb20e75b611f01dde84b26e10df5933a8f4371aa32dbd665eff0c53bb8f3"} Nov 25 16:02:48 crc kubenswrapper[4800]: I1125 16:02:48.677663 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" event={"ID":"5f049eb4-684a-4deb-8305-37d851e0431c","Type":"ContainerStarted","Data":"5805309a9def936c305a1497886d8a59e4c0b9bcb02e252b2d24f58bc7d2bd50"} Nov 25 16:02:48 crc kubenswrapper[4800]: I1125 16:02:48.697946 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" podStartSLOduration=2.195153404 podStartE2EDuration="2.697925586s" podCreationTimestamp="2025-11-25 16:02:46 +0000 UTC" firstStartedPulling="2025-11-25 16:02:47.728962759 +0000 UTC m=+2728.783371241" lastFinishedPulling="2025-11-25 16:02:48.231734941 +0000 UTC m=+2729.286143423" observedRunningTime="2025-11-25 16:02:48.697580647 +0000 UTC m=+2729.751989129" watchObservedRunningTime="2025-11-25 16:02:48.697925586 +0000 UTC m=+2729.752334068" Nov 25 16:02:54 crc kubenswrapper[4800]: I1125 16:02:54.735440 4800 generic.go:334] "Generic (PLEG): container finished" podID="5f049eb4-684a-4deb-8305-37d851e0431c" containerID="222acb20e75b611f01dde84b26e10df5933a8f4371aa32dbd665eff0c53bb8f3" exitCode=0 Nov 25 16:02:54 crc kubenswrapper[4800]: I1125 16:02:54.735552 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" event={"ID":"5f049eb4-684a-4deb-8305-37d851e0431c","Type":"ContainerDied","Data":"222acb20e75b611f01dde84b26e10df5933a8f4371aa32dbd665eff0c53bb8f3"} Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.177961 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.294258 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-ssh-key\") pod \"5f049eb4-684a-4deb-8305-37d851e0431c\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.294367 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-ceph\") pod \"5f049eb4-684a-4deb-8305-37d851e0431c\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.294542 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-inventory\") pod \"5f049eb4-684a-4deb-8305-37d851e0431c\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.294755 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dnts\" (UniqueName: \"kubernetes.io/projected/5f049eb4-684a-4deb-8305-37d851e0431c-kube-api-access-6dnts\") pod \"5f049eb4-684a-4deb-8305-37d851e0431c\" (UID: \"5f049eb4-684a-4deb-8305-37d851e0431c\") " Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.301725 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-ceph" (OuterVolumeSpecName: "ceph") pod "5f049eb4-684a-4deb-8305-37d851e0431c" (UID: "5f049eb4-684a-4deb-8305-37d851e0431c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.303107 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f049eb4-684a-4deb-8305-37d851e0431c-kube-api-access-6dnts" (OuterVolumeSpecName: "kube-api-access-6dnts") pod "5f049eb4-684a-4deb-8305-37d851e0431c" (UID: "5f049eb4-684a-4deb-8305-37d851e0431c"). InnerVolumeSpecName "kube-api-access-6dnts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.331711 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5f049eb4-684a-4deb-8305-37d851e0431c" (UID: "5f049eb4-684a-4deb-8305-37d851e0431c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.337570 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-inventory" (OuterVolumeSpecName: "inventory") pod "5f049eb4-684a-4deb-8305-37d851e0431c" (UID: "5f049eb4-684a-4deb-8305-37d851e0431c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.401350 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.401432 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dnts\" (UniqueName: \"kubernetes.io/projected/5f049eb4-684a-4deb-8305-37d851e0431c-kube-api-access-6dnts\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.401449 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.401460 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f049eb4-684a-4deb-8305-37d851e0431c-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.759653 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" event={"ID":"5f049eb4-684a-4deb-8305-37d851e0431c","Type":"ContainerDied","Data":"5805309a9def936c305a1497886d8a59e4c0b9bcb02e252b2d24f58bc7d2bd50"} Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.759705 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5805309a9def936c305a1497886d8a59e4c0b9bcb02e252b2d24f58bc7d2bd50" Nov 25 16:02:56 crc kubenswrapper[4800]: I1125 16:02:56.759879 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.009327 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25"] Nov 25 16:02:57 crc kubenswrapper[4800]: E1125 16:02:57.010339 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f049eb4-684a-4deb-8305-37d851e0431c" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.010362 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f049eb4-684a-4deb-8305-37d851e0431c" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.010605 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f049eb4-684a-4deb-8305-37d851e0431c" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.011496 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.014015 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.014283 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.014358 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.014661 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.016844 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.017357 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.037256 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25"] Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.116310 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.116417 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.116692 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvbtd\" (UniqueName: \"kubernetes.io/projected/f1b959a3-4fef-48f0-8562-861d6acd7b9c-kube-api-access-dvbtd\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.116760 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.116920 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.117016 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.219670 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.219776 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.219882 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.219927 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.219971 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.220030 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvbtd\" (UniqueName: \"kubernetes.io/projected/f1b959a3-4fef-48f0-8562-861d6acd7b9c-kube-api-access-dvbtd\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.221082 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.224745 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.224771 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.226453 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.228653 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.254666 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvbtd\" (UniqueName: \"kubernetes.io/projected/f1b959a3-4fef-48f0-8562-861d6acd7b9c-kube-api-access-dvbtd\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-4mt25\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.339138 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:02:57 crc kubenswrapper[4800]: I1125 16:02:57.936114 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25"] Nov 25 16:02:58 crc kubenswrapper[4800]: I1125 16:02:58.783987 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" event={"ID":"f1b959a3-4fef-48f0-8562-861d6acd7b9c","Type":"ContainerStarted","Data":"ed34298ea9a1fa15f23f36d2156d5769dc43932787c8431e99d2686415b86ea3"} Nov 25 16:02:58 crc kubenswrapper[4800]: I1125 16:02:58.785017 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" event={"ID":"f1b959a3-4fef-48f0-8562-861d6acd7b9c","Type":"ContainerStarted","Data":"b2e8745bc071f9a3859f18a4d5841c0f7f8983dd51121b86b891ef77ead2c8d1"} Nov 25 16:02:58 crc kubenswrapper[4800]: I1125 16:02:58.807052 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" podStartSLOduration=2.377927088 podStartE2EDuration="2.807024692s" podCreationTimestamp="2025-11-25 16:02:56 +0000 UTC" firstStartedPulling="2025-11-25 16:02:57.927518415 +0000 UTC m=+2738.981926897" lastFinishedPulling="2025-11-25 16:02:58.356615979 +0000 UTC m=+2739.411024501" observedRunningTime="2025-11-25 16:02:58.799821516 +0000 UTC m=+2739.854230008" watchObservedRunningTime="2025-11-25 16:02:58.807024692 +0000 UTC m=+2739.861433174" Nov 25 16:04:09 crc kubenswrapper[4800]: I1125 16:04:09.562627 4800 generic.go:334] "Generic (PLEG): container finished" podID="f1b959a3-4fef-48f0-8562-861d6acd7b9c" containerID="ed34298ea9a1fa15f23f36d2156d5769dc43932787c8431e99d2686415b86ea3" exitCode=0 Nov 25 16:04:09 crc kubenswrapper[4800]: I1125 16:04:09.562763 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" event={"ID":"f1b959a3-4fef-48f0-8562-861d6acd7b9c","Type":"ContainerDied","Data":"ed34298ea9a1fa15f23f36d2156d5769dc43932787c8431e99d2686415b86ea3"} Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.104115 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.271375 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ssh-key\") pod \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.272130 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ovn-combined-ca-bundle\") pod \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.272473 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvbtd\" (UniqueName: \"kubernetes.io/projected/f1b959a3-4fef-48f0-8562-861d6acd7b9c-kube-api-access-dvbtd\") pod \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.272733 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ceph\") pod \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.273166 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-inventory\") pod \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.273568 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ovncontroller-config-0\") pod \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\" (UID: \"f1b959a3-4fef-48f0-8562-861d6acd7b9c\") " Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.280160 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1b959a3-4fef-48f0-8562-861d6acd7b9c-kube-api-access-dvbtd" (OuterVolumeSpecName: "kube-api-access-dvbtd") pod "f1b959a3-4fef-48f0-8562-861d6acd7b9c" (UID: "f1b959a3-4fef-48f0-8562-861d6acd7b9c"). InnerVolumeSpecName "kube-api-access-dvbtd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.280780 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "f1b959a3-4fef-48f0-8562-861d6acd7b9c" (UID: "f1b959a3-4fef-48f0-8562-861d6acd7b9c"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.281561 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ceph" (OuterVolumeSpecName: "ceph") pod "f1b959a3-4fef-48f0-8562-861d6acd7b9c" (UID: "f1b959a3-4fef-48f0-8562-861d6acd7b9c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.303797 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "f1b959a3-4fef-48f0-8562-861d6acd7b9c" (UID: "f1b959a3-4fef-48f0-8562-861d6acd7b9c"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.312938 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f1b959a3-4fef-48f0-8562-861d6acd7b9c" (UID: "f1b959a3-4fef-48f0-8562-861d6acd7b9c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.315827 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-inventory" (OuterVolumeSpecName: "inventory") pod "f1b959a3-4fef-48f0-8562-861d6acd7b9c" (UID: "f1b959a3-4fef-48f0-8562-861d6acd7b9c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.376507 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvbtd\" (UniqueName: \"kubernetes.io/projected/f1b959a3-4fef-48f0-8562-861d6acd7b9c-kube-api-access-dvbtd\") on node \"crc\" DevicePath \"\"" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.376548 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.376558 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.376568 4800 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.376576 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.376588 4800 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1b959a3-4fef-48f0-8562-861d6acd7b9c-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.584601 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" event={"ID":"f1b959a3-4fef-48f0-8562-861d6acd7b9c","Type":"ContainerDied","Data":"b2e8745bc071f9a3859f18a4d5841c0f7f8983dd51121b86b891ef77ead2c8d1"} Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.584670 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2e8745bc071f9a3859f18a4d5841c0f7f8983dd51121b86b891ef77ead2c8d1" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.584704 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-4mt25" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.695570 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg"] Nov 25 16:04:11 crc kubenswrapper[4800]: E1125 16:04:11.696220 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1b959a3-4fef-48f0-8562-861d6acd7b9c" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.696254 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1b959a3-4fef-48f0-8562-861d6acd7b9c" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.696525 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1b959a3-4fef-48f0-8562-861d6acd7b9c" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.697489 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.700047 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.701160 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.701767 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.701967 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.702479 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.714049 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg"] Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.714918 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.715003 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.785327 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.785388 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4wfh\" (UniqueName: \"kubernetes.io/projected/3d4ff997-b0ab-44c1-8d74-3c326d41863d-kube-api-access-r4wfh\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.785496 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.785533 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.785574 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.785637 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.786938 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.890545 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.890651 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4wfh\" (UniqueName: \"kubernetes.io/projected/3d4ff997-b0ab-44c1-8d74-3c326d41863d-kube-api-access-r4wfh\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.891010 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.891060 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.891117 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.891182 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.891292 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.898655 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.898779 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.899475 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.900611 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.900881 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.906634 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:11 crc kubenswrapper[4800]: I1125 16:04:11.914488 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4wfh\" (UniqueName: \"kubernetes.io/projected/3d4ff997-b0ab-44c1-8d74-3c326d41863d-kube-api-access-r4wfh\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:12 crc kubenswrapper[4800]: I1125 16:04:12.036624 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:04:12 crc kubenswrapper[4800]: I1125 16:04:12.693225 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 16:04:12 crc kubenswrapper[4800]: I1125 16:04:12.702706 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg"] Nov 25 16:04:13 crc kubenswrapper[4800]: I1125 16:04:13.608921 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" event={"ID":"3d4ff997-b0ab-44c1-8d74-3c326d41863d","Type":"ContainerStarted","Data":"3f036ca95944e96e8c6f0cee30cd3486d4de3f55902e903e66800bb486e667e2"} Nov 25 16:04:14 crc kubenswrapper[4800]: I1125 16:04:14.624285 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" event={"ID":"3d4ff997-b0ab-44c1-8d74-3c326d41863d","Type":"ContainerStarted","Data":"a96ba9520e52fa6106c726ea5fbccb60fe219e86e1bb22d474955c31cef94740"} Nov 25 16:04:14 crc kubenswrapper[4800]: I1125 16:04:14.663291 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" podStartSLOduration=2.6801870709999998 podStartE2EDuration="3.663256162s" podCreationTimestamp="2025-11-25 16:04:11 +0000 UTC" firstStartedPulling="2025-11-25 16:04:12.692251976 +0000 UTC m=+2813.746660468" lastFinishedPulling="2025-11-25 16:04:13.675321077 +0000 UTC m=+2814.729729559" observedRunningTime="2025-11-25 16:04:14.648823497 +0000 UTC m=+2815.703231979" watchObservedRunningTime="2025-11-25 16:04:14.663256162 +0000 UTC m=+2815.717664644" Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.645397 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j6wf9"] Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.649153 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.662425 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j6wf9"] Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.768605 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81099967-d4f5-4b37-9f7a-0a77bc5e563c-utilities\") pod \"certified-operators-j6wf9\" (UID: \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\") " pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.768727 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81099967-d4f5-4b37-9f7a-0a77bc5e563c-catalog-content\") pod \"certified-operators-j6wf9\" (UID: \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\") " pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.768758 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8hnw\" (UniqueName: \"kubernetes.io/projected/81099967-d4f5-4b37-9f7a-0a77bc5e563c-kube-api-access-f8hnw\") pod \"certified-operators-j6wf9\" (UID: \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\") " pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.871200 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81099967-d4f5-4b37-9f7a-0a77bc5e563c-utilities\") pod \"certified-operators-j6wf9\" (UID: \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\") " pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.871431 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81099967-d4f5-4b37-9f7a-0a77bc5e563c-catalog-content\") pod \"certified-operators-j6wf9\" (UID: \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\") " pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.871458 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8hnw\" (UniqueName: \"kubernetes.io/projected/81099967-d4f5-4b37-9f7a-0a77bc5e563c-kube-api-access-f8hnw\") pod \"certified-operators-j6wf9\" (UID: \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\") " pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.872908 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81099967-d4f5-4b37-9f7a-0a77bc5e563c-catalog-content\") pod \"certified-operators-j6wf9\" (UID: \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\") " pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.874357 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81099967-d4f5-4b37-9f7a-0a77bc5e563c-utilities\") pod \"certified-operators-j6wf9\" (UID: \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\") " pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.902629 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8hnw\" (UniqueName: \"kubernetes.io/projected/81099967-d4f5-4b37-9f7a-0a77bc5e563c-kube-api-access-f8hnw\") pod \"certified-operators-j6wf9\" (UID: \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\") " pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:21 crc kubenswrapper[4800]: I1125 16:04:21.972890 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:22 crc kubenswrapper[4800]: I1125 16:04:22.589107 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j6wf9"] Nov 25 16:04:22 crc kubenswrapper[4800]: I1125 16:04:22.742314 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6wf9" event={"ID":"81099967-d4f5-4b37-9f7a-0a77bc5e563c","Type":"ContainerStarted","Data":"e68fa355209b4a65c39066b3bdfa33f0c7e5e45ab804187eda2fe73aa865c118"} Nov 25 16:04:23 crc kubenswrapper[4800]: I1125 16:04:23.752458 4800 generic.go:334] "Generic (PLEG): container finished" podID="81099967-d4f5-4b37-9f7a-0a77bc5e563c" containerID="6eb1b0f9a822b4ac278e7c0d042fe3ec191ff0d91620e8db4df7c9a9f2c0490b" exitCode=0 Nov 25 16:04:23 crc kubenswrapper[4800]: I1125 16:04:23.752591 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6wf9" event={"ID":"81099967-d4f5-4b37-9f7a-0a77bc5e563c","Type":"ContainerDied","Data":"6eb1b0f9a822b4ac278e7c0d042fe3ec191ff0d91620e8db4df7c9a9f2c0490b"} Nov 25 16:04:25 crc kubenswrapper[4800]: I1125 16:04:25.776086 4800 generic.go:334] "Generic (PLEG): container finished" podID="81099967-d4f5-4b37-9f7a-0a77bc5e563c" containerID="147f4c920c34efbda8342be973a16874b16dd576aed775b6d6c76232759631fe" exitCode=0 Nov 25 16:04:25 crc kubenswrapper[4800]: I1125 16:04:25.776209 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6wf9" event={"ID":"81099967-d4f5-4b37-9f7a-0a77bc5e563c","Type":"ContainerDied","Data":"147f4c920c34efbda8342be973a16874b16dd576aed775b6d6c76232759631fe"} Nov 25 16:04:26 crc kubenswrapper[4800]: I1125 16:04:26.794184 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6wf9" event={"ID":"81099967-d4f5-4b37-9f7a-0a77bc5e563c","Type":"ContainerStarted","Data":"d9bec7be3448e53cccc5059fe4285e38537901720aca3b59ee04302ad752c796"} Nov 25 16:04:31 crc kubenswrapper[4800]: I1125 16:04:31.973309 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:31 crc kubenswrapper[4800]: I1125 16:04:31.974199 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:32 crc kubenswrapper[4800]: I1125 16:04:32.029113 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:32 crc kubenswrapper[4800]: I1125 16:04:32.058518 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j6wf9" podStartSLOduration=8.450416502 podStartE2EDuration="11.058496572s" podCreationTimestamp="2025-11-25 16:04:21 +0000 UTC" firstStartedPulling="2025-11-25 16:04:23.755563806 +0000 UTC m=+2824.809972288" lastFinishedPulling="2025-11-25 16:04:26.363643876 +0000 UTC m=+2827.418052358" observedRunningTime="2025-11-25 16:04:26.819058287 +0000 UTC m=+2827.873466769" watchObservedRunningTime="2025-11-25 16:04:32.058496572 +0000 UTC m=+2833.112905054" Nov 25 16:04:32 crc kubenswrapper[4800]: I1125 16:04:32.931288 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:33 crc kubenswrapper[4800]: I1125 16:04:33.028491 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j6wf9"] Nov 25 16:04:34 crc kubenswrapper[4800]: I1125 16:04:34.894818 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j6wf9" podUID="81099967-d4f5-4b37-9f7a-0a77bc5e563c" containerName="registry-server" containerID="cri-o://d9bec7be3448e53cccc5059fe4285e38537901720aca3b59ee04302ad752c796" gracePeriod=2 Nov 25 16:04:35 crc kubenswrapper[4800]: I1125 16:04:35.908645 4800 generic.go:334] "Generic (PLEG): container finished" podID="81099967-d4f5-4b37-9f7a-0a77bc5e563c" containerID="d9bec7be3448e53cccc5059fe4285e38537901720aca3b59ee04302ad752c796" exitCode=0 Nov 25 16:04:35 crc kubenswrapper[4800]: I1125 16:04:35.908740 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6wf9" event={"ID":"81099967-d4f5-4b37-9f7a-0a77bc5e563c","Type":"ContainerDied","Data":"d9bec7be3448e53cccc5059fe4285e38537901720aca3b59ee04302ad752c796"} Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.338814 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.446827 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81099967-d4f5-4b37-9f7a-0a77bc5e563c-utilities\") pod \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\" (UID: \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\") " Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.447058 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81099967-d4f5-4b37-9f7a-0a77bc5e563c-catalog-content\") pod \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\" (UID: \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\") " Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.447230 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8hnw\" (UniqueName: \"kubernetes.io/projected/81099967-d4f5-4b37-9f7a-0a77bc5e563c-kube-api-access-f8hnw\") pod \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\" (UID: \"81099967-d4f5-4b37-9f7a-0a77bc5e563c\") " Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.459284 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81099967-d4f5-4b37-9f7a-0a77bc5e563c-utilities" (OuterVolumeSpecName: "utilities") pod "81099967-d4f5-4b37-9f7a-0a77bc5e563c" (UID: "81099967-d4f5-4b37-9f7a-0a77bc5e563c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.459509 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81099967-d4f5-4b37-9f7a-0a77bc5e563c-kube-api-access-f8hnw" (OuterVolumeSpecName: "kube-api-access-f8hnw") pod "81099967-d4f5-4b37-9f7a-0a77bc5e563c" (UID: "81099967-d4f5-4b37-9f7a-0a77bc5e563c"). InnerVolumeSpecName "kube-api-access-f8hnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.551391 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81099967-d4f5-4b37-9f7a-0a77bc5e563c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.551459 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8hnw\" (UniqueName: \"kubernetes.io/projected/81099967-d4f5-4b37-9f7a-0a77bc5e563c-kube-api-access-f8hnw\") on node \"crc\" DevicePath \"\"" Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.928449 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6wf9" event={"ID":"81099967-d4f5-4b37-9f7a-0a77bc5e563c","Type":"ContainerDied","Data":"e68fa355209b4a65c39066b3bdfa33f0c7e5e45ab804187eda2fe73aa865c118"} Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.928559 4800 scope.go:117] "RemoveContainer" containerID="d9bec7be3448e53cccc5059fe4285e38537901720aca3b59ee04302ad752c796" Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.928581 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j6wf9" Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.956169 4800 scope.go:117] "RemoveContainer" containerID="147f4c920c34efbda8342be973a16874b16dd576aed775b6d6c76232759631fe" Nov 25 16:04:36 crc kubenswrapper[4800]: I1125 16:04:36.984926 4800 scope.go:117] "RemoveContainer" containerID="6eb1b0f9a822b4ac278e7c0d042fe3ec191ff0d91620e8db4df7c9a9f2c0490b" Nov 25 16:04:37 crc kubenswrapper[4800]: I1125 16:04:37.849412 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81099967-d4f5-4b37-9f7a-0a77bc5e563c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "81099967-d4f5-4b37-9f7a-0a77bc5e563c" (UID: "81099967-d4f5-4b37-9f7a-0a77bc5e563c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:04:37 crc kubenswrapper[4800]: I1125 16:04:37.897813 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81099967-d4f5-4b37-9f7a-0a77bc5e563c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:04:38 crc kubenswrapper[4800]: I1125 16:04:38.178727 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j6wf9"] Nov 25 16:04:38 crc kubenswrapper[4800]: I1125 16:04:38.189928 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j6wf9"] Nov 25 16:04:39 crc kubenswrapper[4800]: I1125 16:04:39.797388 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81099967-d4f5-4b37-9f7a-0a77bc5e563c" path="/var/lib/kubelet/pods/81099967-d4f5-4b37-9f7a-0a77bc5e563c/volumes" Nov 25 16:04:42 crc kubenswrapper[4800]: I1125 16:04:42.648736 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:04:42 crc kubenswrapper[4800]: I1125 16:04:42.649109 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:05:12 crc kubenswrapper[4800]: I1125 16:05:12.640683 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:05:12 crc kubenswrapper[4800]: I1125 16:05:12.641934 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:05:13 crc kubenswrapper[4800]: I1125 16:05:13.357091 4800 generic.go:334] "Generic (PLEG): container finished" podID="3d4ff997-b0ab-44c1-8d74-3c326d41863d" containerID="a96ba9520e52fa6106c726ea5fbccb60fe219e86e1bb22d474955c31cef94740" exitCode=0 Nov 25 16:05:13 crc kubenswrapper[4800]: I1125 16:05:13.357277 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" event={"ID":"3d4ff997-b0ab-44c1-8d74-3c326d41863d","Type":"ContainerDied","Data":"a96ba9520e52fa6106c726ea5fbccb60fe219e86e1bb22d474955c31cef94740"} Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.809895 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.863096 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4wfh\" (UniqueName: \"kubernetes.io/projected/3d4ff997-b0ab-44c1-8d74-3c326d41863d-kube-api-access-r4wfh\") pod \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.863167 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-nova-metadata-neutron-config-0\") pod \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.863242 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-inventory\") pod \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.863394 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-neutron-ovn-metadata-agent-neutron-config-0\") pod \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.863511 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-neutron-metadata-combined-ca-bundle\") pod \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.863567 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-ceph\") pod \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.863664 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-ssh-key\") pod \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\" (UID: \"3d4ff997-b0ab-44c1-8d74-3c326d41863d\") " Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.872184 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "3d4ff997-b0ab-44c1-8d74-3c326d41863d" (UID: "3d4ff997-b0ab-44c1-8d74-3c326d41863d"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.872730 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d4ff997-b0ab-44c1-8d74-3c326d41863d-kube-api-access-r4wfh" (OuterVolumeSpecName: "kube-api-access-r4wfh") pod "3d4ff997-b0ab-44c1-8d74-3c326d41863d" (UID: "3d4ff997-b0ab-44c1-8d74-3c326d41863d"). InnerVolumeSpecName "kube-api-access-r4wfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.872909 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-ceph" (OuterVolumeSpecName: "ceph") pod "3d4ff997-b0ab-44c1-8d74-3c326d41863d" (UID: "3d4ff997-b0ab-44c1-8d74-3c326d41863d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.896871 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "3d4ff997-b0ab-44c1-8d74-3c326d41863d" (UID: "3d4ff997-b0ab-44c1-8d74-3c326d41863d"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.899502 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3d4ff997-b0ab-44c1-8d74-3c326d41863d" (UID: "3d4ff997-b0ab-44c1-8d74-3c326d41863d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.900537 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "3d4ff997-b0ab-44c1-8d74-3c326d41863d" (UID: "3d4ff997-b0ab-44c1-8d74-3c326d41863d"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.901603 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-inventory" (OuterVolumeSpecName: "inventory") pod "3d4ff997-b0ab-44c1-8d74-3c326d41863d" (UID: "3d4ff997-b0ab-44c1-8d74-3c326d41863d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.967310 4800 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.967699 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.967711 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.967723 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4wfh\" (UniqueName: \"kubernetes.io/projected/3d4ff997-b0ab-44c1-8d74-3c326d41863d-kube-api-access-r4wfh\") on node \"crc\" DevicePath \"\"" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.967735 4800 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.967744 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 16:05:14 crc kubenswrapper[4800]: I1125 16:05:14.967755 4800 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/3d4ff997-b0ab-44c1-8d74-3c326d41863d-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.399914 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" event={"ID":"3d4ff997-b0ab-44c1-8d74-3c326d41863d","Type":"ContainerDied","Data":"3f036ca95944e96e8c6f0cee30cd3486d4de3f55902e903e66800bb486e667e2"} Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.399975 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.399983 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f036ca95944e96e8c6f0cee30cd3486d4de3f55902e903e66800bb486e667e2" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.519991 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww"] Nov 25 16:05:15 crc kubenswrapper[4800]: E1125 16:05:15.520565 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81099967-d4f5-4b37-9f7a-0a77bc5e563c" containerName="registry-server" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.520594 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="81099967-d4f5-4b37-9f7a-0a77bc5e563c" containerName="registry-server" Nov 25 16:05:15 crc kubenswrapper[4800]: E1125 16:05:15.520608 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81099967-d4f5-4b37-9f7a-0a77bc5e563c" containerName="extract-content" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.520616 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="81099967-d4f5-4b37-9f7a-0a77bc5e563c" containerName="extract-content" Nov 25 16:05:15 crc kubenswrapper[4800]: E1125 16:05:15.520637 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d4ff997-b0ab-44c1-8d74-3c326d41863d" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.520646 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d4ff997-b0ab-44c1-8d74-3c326d41863d" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 16:05:15 crc kubenswrapper[4800]: E1125 16:05:15.520662 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81099967-d4f5-4b37-9f7a-0a77bc5e563c" containerName="extract-utilities" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.520669 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="81099967-d4f5-4b37-9f7a-0a77bc5e563c" containerName="extract-utilities" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.520920 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d4ff997-b0ab-44c1-8d74-3c326d41863d" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.520939 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="81099967-d4f5-4b37-9f7a-0a77bc5e563c" containerName="registry-server" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.521665 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.525139 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.525651 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.526616 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.526917 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.527106 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.527253 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.531343 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww"] Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.581129 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8n8s\" (UniqueName: \"kubernetes.io/projected/631c307a-96b7-4e9f-829d-2652277cbea1-kube-api-access-s8n8s\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.581293 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.581413 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.581583 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.581719 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.581876 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.684275 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.684379 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.684444 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.684544 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8n8s\" (UniqueName: \"kubernetes.io/projected/631c307a-96b7-4e9f-829d-2652277cbea1-kube-api-access-s8n8s\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.684596 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.684640 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.690033 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.690052 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.690280 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.690476 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.690649 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.705481 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8n8s\" (UniqueName: \"kubernetes.io/projected/631c307a-96b7-4e9f-829d-2652277cbea1-kube-api-access-s8n8s\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-czdww\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:15 crc kubenswrapper[4800]: I1125 16:05:15.840602 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:05:16 crc kubenswrapper[4800]: I1125 16:05:16.240677 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww"] Nov 25 16:05:16 crc kubenswrapper[4800]: I1125 16:05:16.418147 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" event={"ID":"631c307a-96b7-4e9f-829d-2652277cbea1","Type":"ContainerStarted","Data":"285dbe773bc4704fc4b6f40cfd699e244fecf3eb5391d691689fe2151f05dede"} Nov 25 16:05:17 crc kubenswrapper[4800]: I1125 16:05:17.432303 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" event={"ID":"631c307a-96b7-4e9f-829d-2652277cbea1","Type":"ContainerStarted","Data":"d3d8be27aabc637590528590249da3ddfcd9c475d9c21c7b45f97849ca668dec"} Nov 25 16:05:17 crc kubenswrapper[4800]: I1125 16:05:17.463166 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" podStartSLOduration=2.005776986 podStartE2EDuration="2.463139225s" podCreationTimestamp="2025-11-25 16:05:15 +0000 UTC" firstStartedPulling="2025-11-25 16:05:16.249286607 +0000 UTC m=+2877.303695089" lastFinishedPulling="2025-11-25 16:05:16.706648846 +0000 UTC m=+2877.761057328" observedRunningTime="2025-11-25 16:05:17.453707572 +0000 UTC m=+2878.508116054" watchObservedRunningTime="2025-11-25 16:05:17.463139225 +0000 UTC m=+2878.517547707" Nov 25 16:05:42 crc kubenswrapper[4800]: I1125 16:05:42.640271 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:05:42 crc kubenswrapper[4800]: I1125 16:05:42.641123 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:05:42 crc kubenswrapper[4800]: I1125 16:05:42.641201 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 16:05:42 crc kubenswrapper[4800]: I1125 16:05:42.642250 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9327b11d27cb5d471850160152581b6be41da7771b8188b3e0bff16429c3b225"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:05:42 crc kubenswrapper[4800]: I1125 16:05:42.642352 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://9327b11d27cb5d471850160152581b6be41da7771b8188b3e0bff16429c3b225" gracePeriod=600 Nov 25 16:05:43 crc kubenswrapper[4800]: I1125 16:05:43.733008 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="9327b11d27cb5d471850160152581b6be41da7771b8188b3e0bff16429c3b225" exitCode=0 Nov 25 16:05:43 crc kubenswrapper[4800]: I1125 16:05:43.733059 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"9327b11d27cb5d471850160152581b6be41da7771b8188b3e0bff16429c3b225"} Nov 25 16:05:43 crc kubenswrapper[4800]: I1125 16:05:43.733550 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e"} Nov 25 16:05:43 crc kubenswrapper[4800]: I1125 16:05:43.733585 4800 scope.go:117] "RemoveContainer" containerID="7cef09031fc32eef7fe271d095094caa34d16c2d2beae52a158eb988bc0fd724" Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.604386 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-44kq2"] Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.609229 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.618044 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-44kq2"] Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.741196 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2gww\" (UniqueName: \"kubernetes.io/projected/6ac4cdb1-6909-448f-83aa-fb2de7def57a-kube-api-access-m2gww\") pod \"redhat-marketplace-44kq2\" (UID: \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\") " pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.741266 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ac4cdb1-6909-448f-83aa-fb2de7def57a-catalog-content\") pod \"redhat-marketplace-44kq2\" (UID: \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\") " pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.741326 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ac4cdb1-6909-448f-83aa-fb2de7def57a-utilities\") pod \"redhat-marketplace-44kq2\" (UID: \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\") " pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.843399 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2gww\" (UniqueName: \"kubernetes.io/projected/6ac4cdb1-6909-448f-83aa-fb2de7def57a-kube-api-access-m2gww\") pod \"redhat-marketplace-44kq2\" (UID: \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\") " pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.843459 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ac4cdb1-6909-448f-83aa-fb2de7def57a-catalog-content\") pod \"redhat-marketplace-44kq2\" (UID: \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\") " pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.843500 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ac4cdb1-6909-448f-83aa-fb2de7def57a-utilities\") pod \"redhat-marketplace-44kq2\" (UID: \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\") " pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.844163 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ac4cdb1-6909-448f-83aa-fb2de7def57a-utilities\") pod \"redhat-marketplace-44kq2\" (UID: \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\") " pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.844363 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ac4cdb1-6909-448f-83aa-fb2de7def57a-catalog-content\") pod \"redhat-marketplace-44kq2\" (UID: \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\") " pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.865575 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2gww\" (UniqueName: \"kubernetes.io/projected/6ac4cdb1-6909-448f-83aa-fb2de7def57a-kube-api-access-m2gww\") pod \"redhat-marketplace-44kq2\" (UID: \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\") " pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:05:56 crc kubenswrapper[4800]: I1125 16:05:56.942427 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:05:57 crc kubenswrapper[4800]: I1125 16:05:57.242756 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-44kq2"] Nov 25 16:05:57 crc kubenswrapper[4800]: I1125 16:05:57.889920 4800 generic.go:334] "Generic (PLEG): container finished" podID="6ac4cdb1-6909-448f-83aa-fb2de7def57a" containerID="5d4a049b22f8cb9b585d81511fa7aa5d1cc58f5d3d7091310e7dee3f4b37bd0f" exitCode=0 Nov 25 16:05:57 crc kubenswrapper[4800]: I1125 16:05:57.890371 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-44kq2" event={"ID":"6ac4cdb1-6909-448f-83aa-fb2de7def57a","Type":"ContainerDied","Data":"5d4a049b22f8cb9b585d81511fa7aa5d1cc58f5d3d7091310e7dee3f4b37bd0f"} Nov 25 16:05:57 crc kubenswrapper[4800]: I1125 16:05:57.890407 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-44kq2" event={"ID":"6ac4cdb1-6909-448f-83aa-fb2de7def57a","Type":"ContainerStarted","Data":"8880f9af39cf79f4ec49a40a350fec8505b4f39e51c020576e6bdd232585aa1c"} Nov 25 16:05:58 crc kubenswrapper[4800]: I1125 16:05:58.907386 4800 generic.go:334] "Generic (PLEG): container finished" podID="6ac4cdb1-6909-448f-83aa-fb2de7def57a" containerID="5d91a45913216deada009476dd70e3f2e250ec9b414e7d9969c3002a10f69083" exitCode=0 Nov 25 16:05:58 crc kubenswrapper[4800]: I1125 16:05:58.907583 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-44kq2" event={"ID":"6ac4cdb1-6909-448f-83aa-fb2de7def57a","Type":"ContainerDied","Data":"5d91a45913216deada009476dd70e3f2e250ec9b414e7d9969c3002a10f69083"} Nov 25 16:05:59 crc kubenswrapper[4800]: I1125 16:05:59.923598 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-44kq2" event={"ID":"6ac4cdb1-6909-448f-83aa-fb2de7def57a","Type":"ContainerStarted","Data":"30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd"} Nov 25 16:05:59 crc kubenswrapper[4800]: I1125 16:05:59.954695 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-44kq2" podStartSLOduration=2.535109952 podStartE2EDuration="3.954673964s" podCreationTimestamp="2025-11-25 16:05:56 +0000 UTC" firstStartedPulling="2025-11-25 16:05:57.893021784 +0000 UTC m=+2918.947430266" lastFinishedPulling="2025-11-25 16:05:59.312585786 +0000 UTC m=+2920.366994278" observedRunningTime="2025-11-25 16:05:59.945036096 +0000 UTC m=+2920.999444588" watchObservedRunningTime="2025-11-25 16:05:59.954673964 +0000 UTC m=+2921.009082446" Nov 25 16:06:06 crc kubenswrapper[4800]: I1125 16:06:06.943440 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:06:06 crc kubenswrapper[4800]: I1125 16:06:06.944387 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:06:07 crc kubenswrapper[4800]: I1125 16:06:07.020677 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:06:07 crc kubenswrapper[4800]: I1125 16:06:07.101779 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:06:07 crc kubenswrapper[4800]: I1125 16:06:07.268439 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-44kq2"] Nov 25 16:06:09 crc kubenswrapper[4800]: I1125 16:06:09.031142 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-44kq2" podUID="6ac4cdb1-6909-448f-83aa-fb2de7def57a" containerName="registry-server" containerID="cri-o://30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd" gracePeriod=2 Nov 25 16:06:09 crc kubenswrapper[4800]: I1125 16:06:09.619805 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:06:09 crc kubenswrapper[4800]: I1125 16:06:09.751430 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2gww\" (UniqueName: \"kubernetes.io/projected/6ac4cdb1-6909-448f-83aa-fb2de7def57a-kube-api-access-m2gww\") pod \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\" (UID: \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\") " Nov 25 16:06:09 crc kubenswrapper[4800]: I1125 16:06:09.751568 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ac4cdb1-6909-448f-83aa-fb2de7def57a-catalog-content\") pod \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\" (UID: \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\") " Nov 25 16:06:09 crc kubenswrapper[4800]: I1125 16:06:09.751700 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ac4cdb1-6909-448f-83aa-fb2de7def57a-utilities\") pod \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\" (UID: \"6ac4cdb1-6909-448f-83aa-fb2de7def57a\") " Nov 25 16:06:09 crc kubenswrapper[4800]: I1125 16:06:09.752876 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ac4cdb1-6909-448f-83aa-fb2de7def57a-utilities" (OuterVolumeSpecName: "utilities") pod "6ac4cdb1-6909-448f-83aa-fb2de7def57a" (UID: "6ac4cdb1-6909-448f-83aa-fb2de7def57a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:06:09 crc kubenswrapper[4800]: I1125 16:06:09.759323 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ac4cdb1-6909-448f-83aa-fb2de7def57a-kube-api-access-m2gww" (OuterVolumeSpecName: "kube-api-access-m2gww") pod "6ac4cdb1-6909-448f-83aa-fb2de7def57a" (UID: "6ac4cdb1-6909-448f-83aa-fb2de7def57a"). InnerVolumeSpecName "kube-api-access-m2gww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:06:09 crc kubenswrapper[4800]: I1125 16:06:09.769375 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ac4cdb1-6909-448f-83aa-fb2de7def57a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6ac4cdb1-6909-448f-83aa-fb2de7def57a" (UID: "6ac4cdb1-6909-448f-83aa-fb2de7def57a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:06:09 crc kubenswrapper[4800]: I1125 16:06:09.854266 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2gww\" (UniqueName: \"kubernetes.io/projected/6ac4cdb1-6909-448f-83aa-fb2de7def57a-kube-api-access-m2gww\") on node \"crc\" DevicePath \"\"" Nov 25 16:06:09 crc kubenswrapper[4800]: I1125 16:06:09.854341 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ac4cdb1-6909-448f-83aa-fb2de7def57a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:06:09 crc kubenswrapper[4800]: I1125 16:06:09.854354 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ac4cdb1-6909-448f-83aa-fb2de7def57a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.046603 4800 generic.go:334] "Generic (PLEG): container finished" podID="6ac4cdb1-6909-448f-83aa-fb2de7def57a" containerID="30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd" exitCode=0 Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.047964 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-44kq2" event={"ID":"6ac4cdb1-6909-448f-83aa-fb2de7def57a","Type":"ContainerDied","Data":"30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd"} Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.048062 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-44kq2" event={"ID":"6ac4cdb1-6909-448f-83aa-fb2de7def57a","Type":"ContainerDied","Data":"8880f9af39cf79f4ec49a40a350fec8505b4f39e51c020576e6bdd232585aa1c"} Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.048144 4800 scope.go:117] "RemoveContainer" containerID="30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd" Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.048351 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-44kq2" Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.091741 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-44kq2"] Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.099008 4800 scope.go:117] "RemoveContainer" containerID="5d91a45913216deada009476dd70e3f2e250ec9b414e7d9969c3002a10f69083" Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.099574 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-44kq2"] Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.125665 4800 scope.go:117] "RemoveContainer" containerID="5d4a049b22f8cb9b585d81511fa7aa5d1cc58f5d3d7091310e7dee3f4b37bd0f" Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.172496 4800 scope.go:117] "RemoveContainer" containerID="30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd" Nov 25 16:06:10 crc kubenswrapper[4800]: E1125 16:06:10.173546 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd\": container with ID starting with 30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd not found: ID does not exist" containerID="30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd" Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.173592 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd"} err="failed to get container status \"30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd\": rpc error: code = NotFound desc = could not find container \"30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd\": container with ID starting with 30fe11f3181efb32cfddd4a465bdc0b5d8538af1a8470c13abb3388beef232cd not found: ID does not exist" Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.173629 4800 scope.go:117] "RemoveContainer" containerID="5d91a45913216deada009476dd70e3f2e250ec9b414e7d9969c3002a10f69083" Nov 25 16:06:10 crc kubenswrapper[4800]: E1125 16:06:10.174175 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d91a45913216deada009476dd70e3f2e250ec9b414e7d9969c3002a10f69083\": container with ID starting with 5d91a45913216deada009476dd70e3f2e250ec9b414e7d9969c3002a10f69083 not found: ID does not exist" containerID="5d91a45913216deada009476dd70e3f2e250ec9b414e7d9969c3002a10f69083" Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.174226 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d91a45913216deada009476dd70e3f2e250ec9b414e7d9969c3002a10f69083"} err="failed to get container status \"5d91a45913216deada009476dd70e3f2e250ec9b414e7d9969c3002a10f69083\": rpc error: code = NotFound desc = could not find container \"5d91a45913216deada009476dd70e3f2e250ec9b414e7d9969c3002a10f69083\": container with ID starting with 5d91a45913216deada009476dd70e3f2e250ec9b414e7d9969c3002a10f69083 not found: ID does not exist" Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.174245 4800 scope.go:117] "RemoveContainer" containerID="5d4a049b22f8cb9b585d81511fa7aa5d1cc58f5d3d7091310e7dee3f4b37bd0f" Nov 25 16:06:10 crc kubenswrapper[4800]: E1125 16:06:10.174581 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d4a049b22f8cb9b585d81511fa7aa5d1cc58f5d3d7091310e7dee3f4b37bd0f\": container with ID starting with 5d4a049b22f8cb9b585d81511fa7aa5d1cc58f5d3d7091310e7dee3f4b37bd0f not found: ID does not exist" containerID="5d4a049b22f8cb9b585d81511fa7aa5d1cc58f5d3d7091310e7dee3f4b37bd0f" Nov 25 16:06:10 crc kubenswrapper[4800]: I1125 16:06:10.174607 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d4a049b22f8cb9b585d81511fa7aa5d1cc58f5d3d7091310e7dee3f4b37bd0f"} err="failed to get container status \"5d4a049b22f8cb9b585d81511fa7aa5d1cc58f5d3d7091310e7dee3f4b37bd0f\": rpc error: code = NotFound desc = could not find container \"5d4a049b22f8cb9b585d81511fa7aa5d1cc58f5d3d7091310e7dee3f4b37bd0f\": container with ID starting with 5d4a049b22f8cb9b585d81511fa7aa5d1cc58f5d3d7091310e7dee3f4b37bd0f not found: ID does not exist" Nov 25 16:06:11 crc kubenswrapper[4800]: I1125 16:06:11.796058 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ac4cdb1-6909-448f-83aa-fb2de7def57a" path="/var/lib/kubelet/pods/6ac4cdb1-6909-448f-83aa-fb2de7def57a/volumes" Nov 25 16:07:42 crc kubenswrapper[4800]: I1125 16:07:42.640707 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:07:42 crc kubenswrapper[4800]: I1125 16:07:42.641566 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:08:12 crc kubenswrapper[4800]: I1125 16:08:12.640530 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:08:12 crc kubenswrapper[4800]: I1125 16:08:12.641366 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:08:42 crc kubenswrapper[4800]: I1125 16:08:42.639901 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:08:42 crc kubenswrapper[4800]: I1125 16:08:42.640728 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:08:42 crc kubenswrapper[4800]: I1125 16:08:42.640783 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 16:08:42 crc kubenswrapper[4800]: I1125 16:08:42.641694 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:08:42 crc kubenswrapper[4800]: I1125 16:08:42.641759 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" gracePeriod=600 Nov 25 16:08:42 crc kubenswrapper[4800]: E1125 16:08:42.778457 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:08:42 crc kubenswrapper[4800]: I1125 16:08:42.894766 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" exitCode=0 Nov 25 16:08:42 crc kubenswrapper[4800]: I1125 16:08:42.894832 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e"} Nov 25 16:08:42 crc kubenswrapper[4800]: I1125 16:08:42.894947 4800 scope.go:117] "RemoveContainer" containerID="9327b11d27cb5d471850160152581b6be41da7771b8188b3e0bff16429c3b225" Nov 25 16:08:42 crc kubenswrapper[4800]: I1125 16:08:42.895831 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:08:42 crc kubenswrapper[4800]: E1125 16:08:42.896528 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:08:57 crc kubenswrapper[4800]: I1125 16:08:57.786668 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:08:57 crc kubenswrapper[4800]: E1125 16:08:57.788042 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:09:09 crc kubenswrapper[4800]: I1125 16:09:09.794988 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:09:09 crc kubenswrapper[4800]: E1125 16:09:09.795756 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.495584 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hfhbl"] Nov 25 16:09:18 crc kubenswrapper[4800]: E1125 16:09:18.497071 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ac4cdb1-6909-448f-83aa-fb2de7def57a" containerName="extract-utilities" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.497089 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ac4cdb1-6909-448f-83aa-fb2de7def57a" containerName="extract-utilities" Nov 25 16:09:18 crc kubenswrapper[4800]: E1125 16:09:18.497120 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ac4cdb1-6909-448f-83aa-fb2de7def57a" containerName="extract-content" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.497129 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ac4cdb1-6909-448f-83aa-fb2de7def57a" containerName="extract-content" Nov 25 16:09:18 crc kubenswrapper[4800]: E1125 16:09:18.497160 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ac4cdb1-6909-448f-83aa-fb2de7def57a" containerName="registry-server" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.497179 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ac4cdb1-6909-448f-83aa-fb2de7def57a" containerName="registry-server" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.497394 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ac4cdb1-6909-448f-83aa-fb2de7def57a" containerName="registry-server" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.499309 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.513709 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hfhbl"] Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.619178 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4325cf3e-4e72-4659-98cc-a78598a26e81-catalog-content\") pod \"redhat-operators-hfhbl\" (UID: \"4325cf3e-4e72-4659-98cc-a78598a26e81\") " pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.619782 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4325cf3e-4e72-4659-98cc-a78598a26e81-utilities\") pod \"redhat-operators-hfhbl\" (UID: \"4325cf3e-4e72-4659-98cc-a78598a26e81\") " pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.619916 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z86x4\" (UniqueName: \"kubernetes.io/projected/4325cf3e-4e72-4659-98cc-a78598a26e81-kube-api-access-z86x4\") pod \"redhat-operators-hfhbl\" (UID: \"4325cf3e-4e72-4659-98cc-a78598a26e81\") " pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.690238 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cx9xf"] Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.692279 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.701616 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cx9xf"] Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.724869 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4325cf3e-4e72-4659-98cc-a78598a26e81-catalog-content\") pod \"redhat-operators-hfhbl\" (UID: \"4325cf3e-4e72-4659-98cc-a78598a26e81\") " pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.725021 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4325cf3e-4e72-4659-98cc-a78598a26e81-utilities\") pod \"redhat-operators-hfhbl\" (UID: \"4325cf3e-4e72-4659-98cc-a78598a26e81\") " pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.725050 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z86x4\" (UniqueName: \"kubernetes.io/projected/4325cf3e-4e72-4659-98cc-a78598a26e81-kube-api-access-z86x4\") pod \"redhat-operators-hfhbl\" (UID: \"4325cf3e-4e72-4659-98cc-a78598a26e81\") " pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.726053 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4325cf3e-4e72-4659-98cc-a78598a26e81-catalog-content\") pod \"redhat-operators-hfhbl\" (UID: \"4325cf3e-4e72-4659-98cc-a78598a26e81\") " pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.727287 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4325cf3e-4e72-4659-98cc-a78598a26e81-utilities\") pod \"redhat-operators-hfhbl\" (UID: \"4325cf3e-4e72-4659-98cc-a78598a26e81\") " pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.759193 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z86x4\" (UniqueName: \"kubernetes.io/projected/4325cf3e-4e72-4659-98cc-a78598a26e81-kube-api-access-z86x4\") pod \"redhat-operators-hfhbl\" (UID: \"4325cf3e-4e72-4659-98cc-a78598a26e81\") " pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.825808 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.826482 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e11694f9-a2e1-43e2-8dbc-e1f332902156-catalog-content\") pod \"community-operators-cx9xf\" (UID: \"e11694f9-a2e1-43e2-8dbc-e1f332902156\") " pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.826919 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2vnt\" (UniqueName: \"kubernetes.io/projected/e11694f9-a2e1-43e2-8dbc-e1f332902156-kube-api-access-m2vnt\") pod \"community-operators-cx9xf\" (UID: \"e11694f9-a2e1-43e2-8dbc-e1f332902156\") " pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.826951 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e11694f9-a2e1-43e2-8dbc-e1f332902156-utilities\") pod \"community-operators-cx9xf\" (UID: \"e11694f9-a2e1-43e2-8dbc-e1f332902156\") " pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.928597 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e11694f9-a2e1-43e2-8dbc-e1f332902156-catalog-content\") pod \"community-operators-cx9xf\" (UID: \"e11694f9-a2e1-43e2-8dbc-e1f332902156\") " pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.929231 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e11694f9-a2e1-43e2-8dbc-e1f332902156-utilities\") pod \"community-operators-cx9xf\" (UID: \"e11694f9-a2e1-43e2-8dbc-e1f332902156\") " pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.929257 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2vnt\" (UniqueName: \"kubernetes.io/projected/e11694f9-a2e1-43e2-8dbc-e1f332902156-kube-api-access-m2vnt\") pod \"community-operators-cx9xf\" (UID: \"e11694f9-a2e1-43e2-8dbc-e1f332902156\") " pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.930451 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e11694f9-a2e1-43e2-8dbc-e1f332902156-utilities\") pod \"community-operators-cx9xf\" (UID: \"e11694f9-a2e1-43e2-8dbc-e1f332902156\") " pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.930554 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e11694f9-a2e1-43e2-8dbc-e1f332902156-catalog-content\") pod \"community-operators-cx9xf\" (UID: \"e11694f9-a2e1-43e2-8dbc-e1f332902156\") " pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:18 crc kubenswrapper[4800]: I1125 16:09:18.955719 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2vnt\" (UniqueName: \"kubernetes.io/projected/e11694f9-a2e1-43e2-8dbc-e1f332902156-kube-api-access-m2vnt\") pod \"community-operators-cx9xf\" (UID: \"e11694f9-a2e1-43e2-8dbc-e1f332902156\") " pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:19 crc kubenswrapper[4800]: I1125 16:09:19.103157 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:19 crc kubenswrapper[4800]: I1125 16:09:19.481802 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hfhbl"] Nov 25 16:09:19 crc kubenswrapper[4800]: I1125 16:09:19.822560 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cx9xf"] Nov 25 16:09:20 crc kubenswrapper[4800]: I1125 16:09:20.305699 4800 generic.go:334] "Generic (PLEG): container finished" podID="e11694f9-a2e1-43e2-8dbc-e1f332902156" containerID="4f1d05da5384aa6d5f8f55ded1dc1749a01654cf0f337ce84f76a06cc521641c" exitCode=0 Nov 25 16:09:20 crc kubenswrapper[4800]: I1125 16:09:20.306305 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cx9xf" event={"ID":"e11694f9-a2e1-43e2-8dbc-e1f332902156","Type":"ContainerDied","Data":"4f1d05da5384aa6d5f8f55ded1dc1749a01654cf0f337ce84f76a06cc521641c"} Nov 25 16:09:20 crc kubenswrapper[4800]: I1125 16:09:20.306342 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cx9xf" event={"ID":"e11694f9-a2e1-43e2-8dbc-e1f332902156","Type":"ContainerStarted","Data":"3181c92eca163af946dc3e3848eea9f5f80770d815b7a5e6d933005f3debd3d3"} Nov 25 16:09:20 crc kubenswrapper[4800]: I1125 16:09:20.308775 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 16:09:20 crc kubenswrapper[4800]: I1125 16:09:20.311916 4800 generic.go:334] "Generic (PLEG): container finished" podID="4325cf3e-4e72-4659-98cc-a78598a26e81" containerID="d17881fffdd0476a6b4ca1391e188af6cbc9e6db4b1f04f5f811a171c9f3eeaa" exitCode=0 Nov 25 16:09:20 crc kubenswrapper[4800]: I1125 16:09:20.311981 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfhbl" event={"ID":"4325cf3e-4e72-4659-98cc-a78598a26e81","Type":"ContainerDied","Data":"d17881fffdd0476a6b4ca1391e188af6cbc9e6db4b1f04f5f811a171c9f3eeaa"} Nov 25 16:09:20 crc kubenswrapper[4800]: I1125 16:09:20.312027 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfhbl" event={"ID":"4325cf3e-4e72-4659-98cc-a78598a26e81","Type":"ContainerStarted","Data":"105c80ea5f71fb87da64c3a0c09e5487247819ad6f8a5609a6f1f09d560e1f50"} Nov 25 16:09:22 crc kubenswrapper[4800]: I1125 16:09:22.336752 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cx9xf" event={"ID":"e11694f9-a2e1-43e2-8dbc-e1f332902156","Type":"ContainerStarted","Data":"9ce9f676fb942b26b47eb1ab1e37befcb24561e87f7eb24392b76829bc079ec3"} Nov 25 16:09:22 crc kubenswrapper[4800]: I1125 16:09:22.338970 4800 generic.go:334] "Generic (PLEG): container finished" podID="4325cf3e-4e72-4659-98cc-a78598a26e81" containerID="8ab20403a9b398eea32f201d1a51589178486d90dbf1fbdcb7f6fbbcb1f3c9e8" exitCode=0 Nov 25 16:09:22 crc kubenswrapper[4800]: I1125 16:09:22.339050 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfhbl" event={"ID":"4325cf3e-4e72-4659-98cc-a78598a26e81","Type":"ContainerDied","Data":"8ab20403a9b398eea32f201d1a51589178486d90dbf1fbdcb7f6fbbcb1f3c9e8"} Nov 25 16:09:22 crc kubenswrapper[4800]: I1125 16:09:22.785832 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:09:22 crc kubenswrapper[4800]: E1125 16:09:22.786129 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:09:24 crc kubenswrapper[4800]: I1125 16:09:24.363432 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfhbl" event={"ID":"4325cf3e-4e72-4659-98cc-a78598a26e81","Type":"ContainerStarted","Data":"290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a"} Nov 25 16:09:24 crc kubenswrapper[4800]: I1125 16:09:24.402703 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hfhbl" podStartSLOduration=3.034474987 podStartE2EDuration="6.402679431s" podCreationTimestamp="2025-11-25 16:09:18 +0000 UTC" firstStartedPulling="2025-11-25 16:09:20.314407541 +0000 UTC m=+3121.368816023" lastFinishedPulling="2025-11-25 16:09:23.682611985 +0000 UTC m=+3124.737020467" observedRunningTime="2025-11-25 16:09:24.400356168 +0000 UTC m=+3125.454764680" watchObservedRunningTime="2025-11-25 16:09:24.402679431 +0000 UTC m=+3125.457087913" Nov 25 16:09:26 crc kubenswrapper[4800]: I1125 16:09:26.386116 4800 generic.go:334] "Generic (PLEG): container finished" podID="e11694f9-a2e1-43e2-8dbc-e1f332902156" containerID="9ce9f676fb942b26b47eb1ab1e37befcb24561e87f7eb24392b76829bc079ec3" exitCode=0 Nov 25 16:09:26 crc kubenswrapper[4800]: I1125 16:09:26.386217 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cx9xf" event={"ID":"e11694f9-a2e1-43e2-8dbc-e1f332902156","Type":"ContainerDied","Data":"9ce9f676fb942b26b47eb1ab1e37befcb24561e87f7eb24392b76829bc079ec3"} Nov 25 16:09:28 crc kubenswrapper[4800]: I1125 16:09:28.828020 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:28 crc kubenswrapper[4800]: I1125 16:09:28.828935 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:29 crc kubenswrapper[4800]: I1125 16:09:29.417392 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cx9xf" event={"ID":"e11694f9-a2e1-43e2-8dbc-e1f332902156","Type":"ContainerStarted","Data":"4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975"} Nov 25 16:09:29 crc kubenswrapper[4800]: I1125 16:09:29.447707 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cx9xf" podStartSLOduration=3.436522205 podStartE2EDuration="11.447682845s" podCreationTimestamp="2025-11-25 16:09:18 +0000 UTC" firstStartedPulling="2025-11-25 16:09:20.308450599 +0000 UTC m=+3121.362859081" lastFinishedPulling="2025-11-25 16:09:28.319611239 +0000 UTC m=+3129.374019721" observedRunningTime="2025-11-25 16:09:29.436946624 +0000 UTC m=+3130.491355096" watchObservedRunningTime="2025-11-25 16:09:29.447682845 +0000 UTC m=+3130.502091327" Nov 25 16:09:29 crc kubenswrapper[4800]: I1125 16:09:29.899591 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hfhbl" podUID="4325cf3e-4e72-4659-98cc-a78598a26e81" containerName="registry-server" probeResult="failure" output=< Nov 25 16:09:29 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 16:09:29 crc kubenswrapper[4800]: > Nov 25 16:09:34 crc kubenswrapper[4800]: I1125 16:09:34.467783 4800 generic.go:334] "Generic (PLEG): container finished" podID="631c307a-96b7-4e9f-829d-2652277cbea1" containerID="d3d8be27aabc637590528590249da3ddfcd9c475d9c21c7b45f97849ca668dec" exitCode=0 Nov 25 16:09:34 crc kubenswrapper[4800]: I1125 16:09:34.469668 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" event={"ID":"631c307a-96b7-4e9f-829d-2652277cbea1","Type":"ContainerDied","Data":"d3d8be27aabc637590528590249da3ddfcd9c475d9c21c7b45f97849ca668dec"} Nov 25 16:09:35 crc kubenswrapper[4800]: I1125 16:09:35.945097 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:09:35 crc kubenswrapper[4800]: I1125 16:09:35.980103 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-libvirt-combined-ca-bundle\") pod \"631c307a-96b7-4e9f-829d-2652277cbea1\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " Nov 25 16:09:35 crc kubenswrapper[4800]: I1125 16:09:35.980203 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8n8s\" (UniqueName: \"kubernetes.io/projected/631c307a-96b7-4e9f-829d-2652277cbea1-kube-api-access-s8n8s\") pod \"631c307a-96b7-4e9f-829d-2652277cbea1\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " Nov 25 16:09:35 crc kubenswrapper[4800]: I1125 16:09:35.990868 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/631c307a-96b7-4e9f-829d-2652277cbea1-kube-api-access-s8n8s" (OuterVolumeSpecName: "kube-api-access-s8n8s") pod "631c307a-96b7-4e9f-829d-2652277cbea1" (UID: "631c307a-96b7-4e9f-829d-2652277cbea1"). InnerVolumeSpecName "kube-api-access-s8n8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:09:35 crc kubenswrapper[4800]: I1125 16:09:35.991052 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "631c307a-96b7-4e9f-829d-2652277cbea1" (UID: "631c307a-96b7-4e9f-829d-2652277cbea1"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.081573 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-ceph\") pod \"631c307a-96b7-4e9f-829d-2652277cbea1\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.082953 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-inventory\") pod \"631c307a-96b7-4e9f-829d-2652277cbea1\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.083043 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-ssh-key\") pod \"631c307a-96b7-4e9f-829d-2652277cbea1\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.083091 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-libvirt-secret-0\") pod \"631c307a-96b7-4e9f-829d-2652277cbea1\" (UID: \"631c307a-96b7-4e9f-829d-2652277cbea1\") " Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.083827 4800 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.083869 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8n8s\" (UniqueName: \"kubernetes.io/projected/631c307a-96b7-4e9f-829d-2652277cbea1-kube-api-access-s8n8s\") on node \"crc\" DevicePath \"\"" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.086149 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-ceph" (OuterVolumeSpecName: "ceph") pod "631c307a-96b7-4e9f-829d-2652277cbea1" (UID: "631c307a-96b7-4e9f-829d-2652277cbea1"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.118888 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "631c307a-96b7-4e9f-829d-2652277cbea1" (UID: "631c307a-96b7-4e9f-829d-2652277cbea1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.121457 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "631c307a-96b7-4e9f-829d-2652277cbea1" (UID: "631c307a-96b7-4e9f-829d-2652277cbea1"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.122275 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-inventory" (OuterVolumeSpecName: "inventory") pod "631c307a-96b7-4e9f-829d-2652277cbea1" (UID: "631c307a-96b7-4e9f-829d-2652277cbea1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.186732 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.186778 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.186794 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.186804 4800 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/631c307a-96b7-4e9f-829d-2652277cbea1-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.500518 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" event={"ID":"631c307a-96b7-4e9f-829d-2652277cbea1","Type":"ContainerDied","Data":"285dbe773bc4704fc4b6f40cfd699e244fecf3eb5391d691689fe2151f05dede"} Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.500591 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="285dbe773bc4704fc4b6f40cfd699e244fecf3eb5391d691689fe2151f05dede" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.500687 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-czdww" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.603557 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9"] Nov 25 16:09:36 crc kubenswrapper[4800]: E1125 16:09:36.604697 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="631c307a-96b7-4e9f-829d-2652277cbea1" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.604728 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="631c307a-96b7-4e9f-829d-2652277cbea1" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.605266 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="631c307a-96b7-4e9f-829d-2652277cbea1" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.606247 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.614102 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ceph-nova" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.614458 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-fblnb" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.614719 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.614974 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.615137 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.615372 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.615604 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.615799 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.616051 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.638287 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9"] Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.694681 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.694749 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.694777 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.694931 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a04be264-1764-4ff6-b676-688fdb0ced55-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.694995 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/a04be264-1764-4ff6-b676-688fdb0ced55-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.695021 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h47ts\" (UniqueName: \"kubernetes.io/projected/a04be264-1764-4ff6-b676-688fdb0ced55-kube-api-access-h47ts\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.695064 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.695082 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.695100 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.695159 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.695187 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.786537 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:09:36 crc kubenswrapper[4800]: E1125 16:09:36.787020 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.797324 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.797450 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.797487 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.797513 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a04be264-1764-4ff6-b676-688fdb0ced55-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.797560 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/a04be264-1764-4ff6-b676-688fdb0ced55-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.797593 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h47ts\" (UniqueName: \"kubernetes.io/projected/a04be264-1764-4ff6-b676-688fdb0ced55-kube-api-access-h47ts\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.797693 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.797718 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.797745 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.797814 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.798043 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.798730 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/a04be264-1764-4ff6-b676-688fdb0ced55-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.798823 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a04be264-1764-4ff6-b676-688fdb0ced55-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.802909 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.806134 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.806294 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.812640 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.813029 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.813318 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.813358 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.814239 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.816612 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h47ts\" (UniqueName: \"kubernetes.io/projected/a04be264-1764-4ff6-b676-688fdb0ced55-kube-api-access-h47ts\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:36 crc kubenswrapper[4800]: I1125 16:09:36.938248 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:09:37 crc kubenswrapper[4800]: I1125 16:09:37.480143 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9"] Nov 25 16:09:37 crc kubenswrapper[4800]: I1125 16:09:37.512142 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" event={"ID":"a04be264-1764-4ff6-b676-688fdb0ced55","Type":"ContainerStarted","Data":"a2435dc7d6c6856235995361bebab630c2b7e0cc6b6abb55bd879024dd664be6"} Nov 25 16:09:38 crc kubenswrapper[4800]: I1125 16:09:38.529900 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" event={"ID":"a04be264-1764-4ff6-b676-688fdb0ced55","Type":"ContainerStarted","Data":"691b84c1dcaaa495d81a46dc3edbbaf9d4de58fea6396b9d2aa4dfb377ce7245"} Nov 25 16:09:38 crc kubenswrapper[4800]: I1125 16:09:38.561462 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" podStartSLOduration=2.024726663 podStartE2EDuration="2.56143821s" podCreationTimestamp="2025-11-25 16:09:36 +0000 UTC" firstStartedPulling="2025-11-25 16:09:37.485610662 +0000 UTC m=+3138.540019134" lastFinishedPulling="2025-11-25 16:09:38.022322199 +0000 UTC m=+3139.076730681" observedRunningTime="2025-11-25 16:09:38.556561068 +0000 UTC m=+3139.610969590" watchObservedRunningTime="2025-11-25 16:09:38.56143821 +0000 UTC m=+3139.615846692" Nov 25 16:09:38 crc kubenswrapper[4800]: I1125 16:09:38.886171 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:38 crc kubenswrapper[4800]: I1125 16:09:38.952860 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:39 crc kubenswrapper[4800]: I1125 16:09:39.104522 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:39 crc kubenswrapper[4800]: I1125 16:09:39.104589 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:39 crc kubenswrapper[4800]: I1125 16:09:39.139259 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hfhbl"] Nov 25 16:09:39 crc kubenswrapper[4800]: I1125 16:09:39.178142 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:39 crc kubenswrapper[4800]: I1125 16:09:39.594001 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:40 crc kubenswrapper[4800]: I1125 16:09:40.552534 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hfhbl" podUID="4325cf3e-4e72-4659-98cc-a78598a26e81" containerName="registry-server" containerID="cri-o://290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a" gracePeriod=2 Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.048760 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.222574 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4325cf3e-4e72-4659-98cc-a78598a26e81-utilities\") pod \"4325cf3e-4e72-4659-98cc-a78598a26e81\" (UID: \"4325cf3e-4e72-4659-98cc-a78598a26e81\") " Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.222717 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z86x4\" (UniqueName: \"kubernetes.io/projected/4325cf3e-4e72-4659-98cc-a78598a26e81-kube-api-access-z86x4\") pod \"4325cf3e-4e72-4659-98cc-a78598a26e81\" (UID: \"4325cf3e-4e72-4659-98cc-a78598a26e81\") " Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.222860 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4325cf3e-4e72-4659-98cc-a78598a26e81-catalog-content\") pod \"4325cf3e-4e72-4659-98cc-a78598a26e81\" (UID: \"4325cf3e-4e72-4659-98cc-a78598a26e81\") " Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.223698 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4325cf3e-4e72-4659-98cc-a78598a26e81-utilities" (OuterVolumeSpecName: "utilities") pod "4325cf3e-4e72-4659-98cc-a78598a26e81" (UID: "4325cf3e-4e72-4659-98cc-a78598a26e81"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.224539 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4325cf3e-4e72-4659-98cc-a78598a26e81-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.231387 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4325cf3e-4e72-4659-98cc-a78598a26e81-kube-api-access-z86x4" (OuterVolumeSpecName: "kube-api-access-z86x4") pod "4325cf3e-4e72-4659-98cc-a78598a26e81" (UID: "4325cf3e-4e72-4659-98cc-a78598a26e81"). InnerVolumeSpecName "kube-api-access-z86x4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.326121 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4325cf3e-4e72-4659-98cc-a78598a26e81-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4325cf3e-4e72-4659-98cc-a78598a26e81" (UID: "4325cf3e-4e72-4659-98cc-a78598a26e81"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.343321 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z86x4\" (UniqueName: \"kubernetes.io/projected/4325cf3e-4e72-4659-98cc-a78598a26e81-kube-api-access-z86x4\") on node \"crc\" DevicePath \"\"" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.343369 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4325cf3e-4e72-4659-98cc-a78598a26e81-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.537249 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cx9xf"] Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.565341 4800 generic.go:334] "Generic (PLEG): container finished" podID="4325cf3e-4e72-4659-98cc-a78598a26e81" containerID="290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a" exitCode=0 Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.565503 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hfhbl" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.565482 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfhbl" event={"ID":"4325cf3e-4e72-4659-98cc-a78598a26e81","Type":"ContainerDied","Data":"290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a"} Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.565595 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfhbl" event={"ID":"4325cf3e-4e72-4659-98cc-a78598a26e81","Type":"ContainerDied","Data":"105c80ea5f71fb87da64c3a0c09e5487247819ad6f8a5609a6f1f09d560e1f50"} Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.565631 4800 scope.go:117] "RemoveContainer" containerID="290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.565665 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cx9xf" podUID="e11694f9-a2e1-43e2-8dbc-e1f332902156" containerName="registry-server" containerID="cri-o://4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975" gracePeriod=2 Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.601575 4800 scope.go:117] "RemoveContainer" containerID="8ab20403a9b398eea32f201d1a51589178486d90dbf1fbdcb7f6fbbcb1f3c9e8" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.628188 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hfhbl"] Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.634679 4800 scope.go:117] "RemoveContainer" containerID="d17881fffdd0476a6b4ca1391e188af6cbc9e6db4b1f04f5f811a171c9f3eeaa" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.640669 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hfhbl"] Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.809408 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4325cf3e-4e72-4659-98cc-a78598a26e81" path="/var/lib/kubelet/pods/4325cf3e-4e72-4659-98cc-a78598a26e81/volumes" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.816966 4800 scope.go:117] "RemoveContainer" containerID="290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a" Nov 25 16:09:41 crc kubenswrapper[4800]: E1125 16:09:41.817514 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a\": container with ID starting with 290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a not found: ID does not exist" containerID="290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.817541 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a"} err="failed to get container status \"290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a\": rpc error: code = NotFound desc = could not find container \"290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a\": container with ID starting with 290e09613fe10fd76e12b4dce9a3c0299f0841da6b921215ee566239cbf4f24a not found: ID does not exist" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.817564 4800 scope.go:117] "RemoveContainer" containerID="8ab20403a9b398eea32f201d1a51589178486d90dbf1fbdcb7f6fbbcb1f3c9e8" Nov 25 16:09:41 crc kubenswrapper[4800]: E1125 16:09:41.818323 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ab20403a9b398eea32f201d1a51589178486d90dbf1fbdcb7f6fbbcb1f3c9e8\": container with ID starting with 8ab20403a9b398eea32f201d1a51589178486d90dbf1fbdcb7f6fbbcb1f3c9e8 not found: ID does not exist" containerID="8ab20403a9b398eea32f201d1a51589178486d90dbf1fbdcb7f6fbbcb1f3c9e8" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.818396 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ab20403a9b398eea32f201d1a51589178486d90dbf1fbdcb7f6fbbcb1f3c9e8"} err="failed to get container status \"8ab20403a9b398eea32f201d1a51589178486d90dbf1fbdcb7f6fbbcb1f3c9e8\": rpc error: code = NotFound desc = could not find container \"8ab20403a9b398eea32f201d1a51589178486d90dbf1fbdcb7f6fbbcb1f3c9e8\": container with ID starting with 8ab20403a9b398eea32f201d1a51589178486d90dbf1fbdcb7f6fbbcb1f3c9e8 not found: ID does not exist" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.818437 4800 scope.go:117] "RemoveContainer" containerID="d17881fffdd0476a6b4ca1391e188af6cbc9e6db4b1f04f5f811a171c9f3eeaa" Nov 25 16:09:41 crc kubenswrapper[4800]: E1125 16:09:41.818752 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d17881fffdd0476a6b4ca1391e188af6cbc9e6db4b1f04f5f811a171c9f3eeaa\": container with ID starting with d17881fffdd0476a6b4ca1391e188af6cbc9e6db4b1f04f5f811a171c9f3eeaa not found: ID does not exist" containerID="d17881fffdd0476a6b4ca1391e188af6cbc9e6db4b1f04f5f811a171c9f3eeaa" Nov 25 16:09:41 crc kubenswrapper[4800]: I1125 16:09:41.818779 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d17881fffdd0476a6b4ca1391e188af6cbc9e6db4b1f04f5f811a171c9f3eeaa"} err="failed to get container status \"d17881fffdd0476a6b4ca1391e188af6cbc9e6db4b1f04f5f811a171c9f3eeaa\": rpc error: code = NotFound desc = could not find container \"d17881fffdd0476a6b4ca1391e188af6cbc9e6db4b1f04f5f811a171c9f3eeaa\": container with ID starting with d17881fffdd0476a6b4ca1391e188af6cbc9e6db4b1f04f5f811a171c9f3eeaa not found: ID does not exist" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.044720 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.160033 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2vnt\" (UniqueName: \"kubernetes.io/projected/e11694f9-a2e1-43e2-8dbc-e1f332902156-kube-api-access-m2vnt\") pod \"e11694f9-a2e1-43e2-8dbc-e1f332902156\" (UID: \"e11694f9-a2e1-43e2-8dbc-e1f332902156\") " Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.160132 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e11694f9-a2e1-43e2-8dbc-e1f332902156-utilities\") pod \"e11694f9-a2e1-43e2-8dbc-e1f332902156\" (UID: \"e11694f9-a2e1-43e2-8dbc-e1f332902156\") " Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.160454 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e11694f9-a2e1-43e2-8dbc-e1f332902156-catalog-content\") pod \"e11694f9-a2e1-43e2-8dbc-e1f332902156\" (UID: \"e11694f9-a2e1-43e2-8dbc-e1f332902156\") " Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.161619 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e11694f9-a2e1-43e2-8dbc-e1f332902156-utilities" (OuterVolumeSpecName: "utilities") pod "e11694f9-a2e1-43e2-8dbc-e1f332902156" (UID: "e11694f9-a2e1-43e2-8dbc-e1f332902156"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.168370 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e11694f9-a2e1-43e2-8dbc-e1f332902156-kube-api-access-m2vnt" (OuterVolumeSpecName: "kube-api-access-m2vnt") pod "e11694f9-a2e1-43e2-8dbc-e1f332902156" (UID: "e11694f9-a2e1-43e2-8dbc-e1f332902156"). InnerVolumeSpecName "kube-api-access-m2vnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.223376 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e11694f9-a2e1-43e2-8dbc-e1f332902156-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e11694f9-a2e1-43e2-8dbc-e1f332902156" (UID: "e11694f9-a2e1-43e2-8dbc-e1f332902156"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.263152 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e11694f9-a2e1-43e2-8dbc-e1f332902156-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.263227 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2vnt\" (UniqueName: \"kubernetes.io/projected/e11694f9-a2e1-43e2-8dbc-e1f332902156-kube-api-access-m2vnt\") on node \"crc\" DevicePath \"\"" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.263241 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e11694f9-a2e1-43e2-8dbc-e1f332902156-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.579698 4800 generic.go:334] "Generic (PLEG): container finished" podID="e11694f9-a2e1-43e2-8dbc-e1f332902156" containerID="4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975" exitCode=0 Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.580226 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cx9xf" event={"ID":"e11694f9-a2e1-43e2-8dbc-e1f332902156","Type":"ContainerDied","Data":"4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975"} Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.580275 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cx9xf" event={"ID":"e11694f9-a2e1-43e2-8dbc-e1f332902156","Type":"ContainerDied","Data":"3181c92eca163af946dc3e3848eea9f5f80770d815b7a5e6d933005f3debd3d3"} Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.580307 4800 scope.go:117] "RemoveContainer" containerID="4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.580567 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cx9xf" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.604334 4800 scope.go:117] "RemoveContainer" containerID="9ce9f676fb942b26b47eb1ab1e37befcb24561e87f7eb24392b76829bc079ec3" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.628584 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cx9xf"] Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.637852 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cx9xf"] Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.651976 4800 scope.go:117] "RemoveContainer" containerID="4f1d05da5384aa6d5f8f55ded1dc1749a01654cf0f337ce84f76a06cc521641c" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.676675 4800 scope.go:117] "RemoveContainer" containerID="4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975" Nov 25 16:09:42 crc kubenswrapper[4800]: E1125 16:09:42.677216 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975\": container with ID starting with 4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975 not found: ID does not exist" containerID="4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.677253 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975"} err="failed to get container status \"4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975\": rpc error: code = NotFound desc = could not find container \"4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975\": container with ID starting with 4098e4a4ef2d1badef372ed30fb6f3b54b6653fe709c7c4a84a75290e110d975 not found: ID does not exist" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.677283 4800 scope.go:117] "RemoveContainer" containerID="9ce9f676fb942b26b47eb1ab1e37befcb24561e87f7eb24392b76829bc079ec3" Nov 25 16:09:42 crc kubenswrapper[4800]: E1125 16:09:42.677821 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ce9f676fb942b26b47eb1ab1e37befcb24561e87f7eb24392b76829bc079ec3\": container with ID starting with 9ce9f676fb942b26b47eb1ab1e37befcb24561e87f7eb24392b76829bc079ec3 not found: ID does not exist" containerID="9ce9f676fb942b26b47eb1ab1e37befcb24561e87f7eb24392b76829bc079ec3" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.677867 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ce9f676fb942b26b47eb1ab1e37befcb24561e87f7eb24392b76829bc079ec3"} err="failed to get container status \"9ce9f676fb942b26b47eb1ab1e37befcb24561e87f7eb24392b76829bc079ec3\": rpc error: code = NotFound desc = could not find container \"9ce9f676fb942b26b47eb1ab1e37befcb24561e87f7eb24392b76829bc079ec3\": container with ID starting with 9ce9f676fb942b26b47eb1ab1e37befcb24561e87f7eb24392b76829bc079ec3 not found: ID does not exist" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.677888 4800 scope.go:117] "RemoveContainer" containerID="4f1d05da5384aa6d5f8f55ded1dc1749a01654cf0f337ce84f76a06cc521641c" Nov 25 16:09:42 crc kubenswrapper[4800]: E1125 16:09:42.678267 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f1d05da5384aa6d5f8f55ded1dc1749a01654cf0f337ce84f76a06cc521641c\": container with ID starting with 4f1d05da5384aa6d5f8f55ded1dc1749a01654cf0f337ce84f76a06cc521641c not found: ID does not exist" containerID="4f1d05da5384aa6d5f8f55ded1dc1749a01654cf0f337ce84f76a06cc521641c" Nov 25 16:09:42 crc kubenswrapper[4800]: I1125 16:09:42.678335 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f1d05da5384aa6d5f8f55ded1dc1749a01654cf0f337ce84f76a06cc521641c"} err="failed to get container status \"4f1d05da5384aa6d5f8f55ded1dc1749a01654cf0f337ce84f76a06cc521641c\": rpc error: code = NotFound desc = could not find container \"4f1d05da5384aa6d5f8f55ded1dc1749a01654cf0f337ce84f76a06cc521641c\": container with ID starting with 4f1d05da5384aa6d5f8f55ded1dc1749a01654cf0f337ce84f76a06cc521641c not found: ID does not exist" Nov 25 16:09:43 crc kubenswrapper[4800]: I1125 16:09:43.802652 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e11694f9-a2e1-43e2-8dbc-e1f332902156" path="/var/lib/kubelet/pods/e11694f9-a2e1-43e2-8dbc-e1f332902156/volumes" Nov 25 16:09:51 crc kubenswrapper[4800]: I1125 16:09:51.785989 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:09:51 crc kubenswrapper[4800]: E1125 16:09:51.786808 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:10:05 crc kubenswrapper[4800]: I1125 16:10:05.785578 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:10:05 crc kubenswrapper[4800]: E1125 16:10:05.786675 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:10:17 crc kubenswrapper[4800]: I1125 16:10:17.786188 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:10:17 crc kubenswrapper[4800]: E1125 16:10:17.786953 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:10:31 crc kubenswrapper[4800]: I1125 16:10:31.786207 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:10:31 crc kubenswrapper[4800]: E1125 16:10:31.787346 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:10:45 crc kubenswrapper[4800]: I1125 16:10:45.785961 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:10:45 crc kubenswrapper[4800]: E1125 16:10:45.787143 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:10:59 crc kubenswrapper[4800]: I1125 16:10:59.798212 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:10:59 crc kubenswrapper[4800]: E1125 16:10:59.799786 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:11:02 crc kubenswrapper[4800]: I1125 16:11:02.500031 4800 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-nqpgj container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 16:11:02 crc kubenswrapper[4800]: I1125 16:11:02.500390 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nqpgj" podUID="4c143db6-2d6b-49bd-987b-a3fbacb8a562" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 16:11:12 crc kubenswrapper[4800]: I1125 16:11:12.786221 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:11:12 crc kubenswrapper[4800]: E1125 16:11:12.787338 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:11:25 crc kubenswrapper[4800]: I1125 16:11:25.786006 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:11:25 crc kubenswrapper[4800]: E1125 16:11:25.789113 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:11:36 crc kubenswrapper[4800]: I1125 16:11:36.786599 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:11:36 crc kubenswrapper[4800]: E1125 16:11:36.787617 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:11:49 crc kubenswrapper[4800]: I1125 16:11:49.800251 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:11:49 crc kubenswrapper[4800]: E1125 16:11:49.805265 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:12:02 crc kubenswrapper[4800]: I1125 16:12:02.786837 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:12:02 crc kubenswrapper[4800]: E1125 16:12:02.788105 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:12:14 crc kubenswrapper[4800]: I1125 16:12:14.786358 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:12:14 crc kubenswrapper[4800]: E1125 16:12:14.787395 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:12:25 crc kubenswrapper[4800]: I1125 16:12:25.785385 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:12:25 crc kubenswrapper[4800]: E1125 16:12:25.786523 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:12:38 crc kubenswrapper[4800]: I1125 16:12:38.785471 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:12:38 crc kubenswrapper[4800]: E1125 16:12:38.786593 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:12:39 crc kubenswrapper[4800]: I1125 16:12:39.360011 4800 generic.go:334] "Generic (PLEG): container finished" podID="a04be264-1764-4ff6-b676-688fdb0ced55" containerID="691b84c1dcaaa495d81a46dc3edbbaf9d4de58fea6396b9d2aa4dfb377ce7245" exitCode=0 Nov 25 16:12:39 crc kubenswrapper[4800]: I1125 16:12:39.360071 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" event={"ID":"a04be264-1764-4ff6-b676-688fdb0ced55","Type":"ContainerDied","Data":"691b84c1dcaaa495d81a46dc3edbbaf9d4de58fea6396b9d2aa4dfb377ce7245"} Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.808867 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.930269 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-ssh-key\") pod \"a04be264-1764-4ff6-b676-688fdb0ced55\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.930352 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-custom-ceph-combined-ca-bundle\") pod \"a04be264-1764-4ff6-b676-688fdb0ced55\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.931233 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-cell1-compute-config-0\") pod \"a04be264-1764-4ff6-b676-688fdb0ced55\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.931269 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-migration-ssh-key-1\") pod \"a04be264-1764-4ff6-b676-688fdb0ced55\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.931333 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-cell1-compute-config-1\") pod \"a04be264-1764-4ff6-b676-688fdb0ced55\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.931512 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/a04be264-1764-4ff6-b676-688fdb0ced55-ceph-nova-0\") pod \"a04be264-1764-4ff6-b676-688fdb0ced55\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.931585 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-migration-ssh-key-0\") pod \"a04be264-1764-4ff6-b676-688fdb0ced55\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.931636 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-ceph\") pod \"a04be264-1764-4ff6-b676-688fdb0ced55\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.931664 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h47ts\" (UniqueName: \"kubernetes.io/projected/a04be264-1764-4ff6-b676-688fdb0ced55-kube-api-access-h47ts\") pod \"a04be264-1764-4ff6-b676-688fdb0ced55\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.931738 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a04be264-1764-4ff6-b676-688fdb0ced55-nova-extra-config-0\") pod \"a04be264-1764-4ff6-b676-688fdb0ced55\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.931813 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-inventory\") pod \"a04be264-1764-4ff6-b676-688fdb0ced55\" (UID: \"a04be264-1764-4ff6-b676-688fdb0ced55\") " Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.939459 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-custom-ceph-combined-ca-bundle" (OuterVolumeSpecName: "nova-custom-ceph-combined-ca-bundle") pod "a04be264-1764-4ff6-b676-688fdb0ced55" (UID: "a04be264-1764-4ff6-b676-688fdb0ced55"). InnerVolumeSpecName "nova-custom-ceph-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.939467 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-ceph" (OuterVolumeSpecName: "ceph") pod "a04be264-1764-4ff6-b676-688fdb0ced55" (UID: "a04be264-1764-4ff6-b676-688fdb0ced55"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.961034 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a04be264-1764-4ff6-b676-688fdb0ced55-kube-api-access-h47ts" (OuterVolumeSpecName: "kube-api-access-h47ts") pod "a04be264-1764-4ff6-b676-688fdb0ced55" (UID: "a04be264-1764-4ff6-b676-688fdb0ced55"). InnerVolumeSpecName "kube-api-access-h47ts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.963765 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a04be264-1764-4ff6-b676-688fdb0ced55-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "a04be264-1764-4ff6-b676-688fdb0ced55" (UID: "a04be264-1764-4ff6-b676-688fdb0ced55"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.969006 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "a04be264-1764-4ff6-b676-688fdb0ced55" (UID: "a04be264-1764-4ff6-b676-688fdb0ced55"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.970485 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "a04be264-1764-4ff6-b676-688fdb0ced55" (UID: "a04be264-1764-4ff6-b676-688fdb0ced55"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.973022 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "a04be264-1764-4ff6-b676-688fdb0ced55" (UID: "a04be264-1764-4ff6-b676-688fdb0ced55"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.977959 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a04be264-1764-4ff6-b676-688fdb0ced55-ceph-nova-0" (OuterVolumeSpecName: "ceph-nova-0") pod "a04be264-1764-4ff6-b676-688fdb0ced55" (UID: "a04be264-1764-4ff6-b676-688fdb0ced55"). InnerVolumeSpecName "ceph-nova-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.982315 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a04be264-1764-4ff6-b676-688fdb0ced55" (UID: "a04be264-1764-4ff6-b676-688fdb0ced55"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.984828 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-inventory" (OuterVolumeSpecName: "inventory") pod "a04be264-1764-4ff6-b676-688fdb0ced55" (UID: "a04be264-1764-4ff6-b676-688fdb0ced55"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:12:40 crc kubenswrapper[4800]: I1125 16:12:40.996160 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "a04be264-1764-4ff6-b676-688fdb0ced55" (UID: "a04be264-1764-4ff6-b676-688fdb0ced55"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.034026 4800 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.034076 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.034091 4800 reconciler_common.go:293] "Volume detached for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-custom-ceph-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.034107 4800 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.034121 4800 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.034129 4800 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.034137 4800 reconciler_common.go:293] "Volume detached for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/a04be264-1764-4ff6-b676-688fdb0ced55-ceph-nova-0\") on node \"crc\" DevicePath \"\"" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.034147 4800 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.034155 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a04be264-1764-4ff6-b676-688fdb0ced55-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.034163 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h47ts\" (UniqueName: \"kubernetes.io/projected/a04be264-1764-4ff6-b676-688fdb0ced55-kube-api-access-h47ts\") on node \"crc\" DevicePath \"\"" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.034171 4800 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a04be264-1764-4ff6-b676-688fdb0ced55-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.385739 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" event={"ID":"a04be264-1764-4ff6-b676-688fdb0ced55","Type":"ContainerDied","Data":"a2435dc7d6c6856235995361bebab630c2b7e0cc6b6abb55bd879024dd664be6"} Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.386227 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2435dc7d6c6856235995361bebab630c2b7e0cc6b6abb55bd879024dd664be6" Nov 25 16:12:41 crc kubenswrapper[4800]: I1125 16:12:41.385869 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9" Nov 25 16:12:50 crc kubenswrapper[4800]: I1125 16:12:50.786418 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:12:50 crc kubenswrapper[4800]: E1125 16:12:50.787555 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.842422 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 25 16:12:56 crc kubenswrapper[4800]: E1125 16:12:56.843805 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4325cf3e-4e72-4659-98cc-a78598a26e81" containerName="extract-utilities" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.843827 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="4325cf3e-4e72-4659-98cc-a78598a26e81" containerName="extract-utilities" Nov 25 16:12:56 crc kubenswrapper[4800]: E1125 16:12:56.843868 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e11694f9-a2e1-43e2-8dbc-e1f332902156" containerName="registry-server" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.843875 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e11694f9-a2e1-43e2-8dbc-e1f332902156" containerName="registry-server" Nov 25 16:12:56 crc kubenswrapper[4800]: E1125 16:12:56.843886 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e11694f9-a2e1-43e2-8dbc-e1f332902156" containerName="extract-utilities" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.843896 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e11694f9-a2e1-43e2-8dbc-e1f332902156" containerName="extract-utilities" Nov 25 16:12:56 crc kubenswrapper[4800]: E1125 16:12:56.843917 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e11694f9-a2e1-43e2-8dbc-e1f332902156" containerName="extract-content" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.843923 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e11694f9-a2e1-43e2-8dbc-e1f332902156" containerName="extract-content" Nov 25 16:12:56 crc kubenswrapper[4800]: E1125 16:12:56.843935 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a04be264-1764-4ff6-b676-688fdb0ced55" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.843947 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a04be264-1764-4ff6-b676-688fdb0ced55" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 25 16:12:56 crc kubenswrapper[4800]: E1125 16:12:56.843965 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4325cf3e-4e72-4659-98cc-a78598a26e81" containerName="registry-server" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.843973 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="4325cf3e-4e72-4659-98cc-a78598a26e81" containerName="registry-server" Nov 25 16:12:56 crc kubenswrapper[4800]: E1125 16:12:56.843996 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4325cf3e-4e72-4659-98cc-a78598a26e81" containerName="extract-content" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.844004 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="4325cf3e-4e72-4659-98cc-a78598a26e81" containerName="extract-content" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.844255 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="4325cf3e-4e72-4659-98cc-a78598a26e81" containerName="registry-server" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.844281 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="a04be264-1764-4ff6-b676-688fdb0ced55" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.844316 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="e11694f9-a2e1-43e2-8dbc-e1f332902156" containerName="registry-server" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.845554 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.848701 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.848720 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.859361 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.861893 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.872703 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.880522 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.911278 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976046 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4597m\" (UniqueName: \"kubernetes.io/projected/7be6b551-566a-410c-b8f9-892dee455826-kube-api-access-4597m\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976121 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9418bcee-6bf4-4758-9ffc-ce6945012a4e-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976264 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976310 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9418bcee-6bf4-4758-9ffc-ce6945012a4e-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976388 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976428 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7be6b551-566a-410c-b8f9-892dee455826-scripts\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976452 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-run\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976503 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7be6b551-566a-410c-b8f9-892dee455826-config-data-custom\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976527 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976550 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976580 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-etc-nvme\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976601 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976686 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-lib-modules\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976715 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9418bcee-6bf4-4758-9ffc-ce6945012a4e-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976764 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9418bcee-6bf4-4758-9ffc-ce6945012a4e-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976786 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976908 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-sys\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.976968 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9418bcee-6bf4-4758-9ffc-ce6945012a4e-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977100 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-dev\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977148 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-dev\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977172 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977250 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7be6b551-566a-410c-b8f9-892dee455826-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977283 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977324 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977422 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977444 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7be6b551-566a-410c-b8f9-892dee455826-config-data\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977498 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-run\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977534 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977561 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7be6b551-566a-410c-b8f9-892dee455826-ceph\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977604 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-sys\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977622 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bx78\" (UniqueName: \"kubernetes.io/projected/9418bcee-6bf4-4758-9ffc-ce6945012a4e-kube-api-access-6bx78\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:56 crc kubenswrapper[4800]: I1125 16:12:56.977697 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080174 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9418bcee-6bf4-4758-9ffc-ce6945012a4e-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080281 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9418bcee-6bf4-4758-9ffc-ce6945012a4e-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080320 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080366 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-sys\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080409 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9418bcee-6bf4-4758-9ffc-ce6945012a4e-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080480 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-dev\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080486 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080522 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-dev\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080486 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-sys\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080570 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-dev\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080612 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-dev\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080674 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080762 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7be6b551-566a-410c-b8f9-892dee455826-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080787 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080788 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.080971 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081545 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081636 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081658 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7be6b551-566a-410c-b8f9-892dee455826-config-data\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081706 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-run\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081735 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081762 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7be6b551-566a-410c-b8f9-892dee455826-ceph\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081783 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-sys\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081805 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bx78\" (UniqueName: \"kubernetes.io/projected/9418bcee-6bf4-4758-9ffc-ce6945012a4e-kube-api-access-6bx78\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081865 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081898 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4597m\" (UniqueName: \"kubernetes.io/projected/7be6b551-566a-410c-b8f9-892dee455826-kube-api-access-4597m\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081905 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-run\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081923 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9418bcee-6bf4-4758-9ffc-ce6945012a4e-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081939 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.081985 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082023 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9418bcee-6bf4-4758-9ffc-ce6945012a4e-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082058 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082096 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7be6b551-566a-410c-b8f9-892dee455826-scripts\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082116 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-run\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082150 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7be6b551-566a-410c-b8f9-892dee455826-config-data-custom\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082169 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082185 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082208 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-etc-nvme\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082227 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082320 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-lib-modules\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082439 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082463 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-lib-modules\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.082510 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.086765 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-sys\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.087336 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.087395 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.087961 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.088011 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.088040 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-run\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.088475 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/7be6b551-566a-410c-b8f9-892dee455826-etc-nvme\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.088552 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.088674 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9418bcee-6bf4-4758-9ffc-ce6945012a4e-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.089745 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9418bcee-6bf4-4758-9ffc-ce6945012a4e-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.090447 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9418bcee-6bf4-4758-9ffc-ce6945012a4e-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.091786 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9418bcee-6bf4-4758-9ffc-ce6945012a4e-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.095662 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7be6b551-566a-410c-b8f9-892dee455826-scripts\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.095894 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9418bcee-6bf4-4758-9ffc-ce6945012a4e-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.097043 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7be6b551-566a-410c-b8f9-892dee455826-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.097100 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9418bcee-6bf4-4758-9ffc-ce6945012a4e-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.102538 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7be6b551-566a-410c-b8f9-892dee455826-ceph\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.103820 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7be6b551-566a-410c-b8f9-892dee455826-config-data\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.108694 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4597m\" (UniqueName: \"kubernetes.io/projected/7be6b551-566a-410c-b8f9-892dee455826-kube-api-access-4597m\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.109224 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7be6b551-566a-410c-b8f9-892dee455826-config-data-custom\") pod \"cinder-backup-0\" (UID: \"7be6b551-566a-410c-b8f9-892dee455826\") " pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.112586 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bx78\" (UniqueName: \"kubernetes.io/projected/9418bcee-6bf4-4758-9ffc-ce6945012a4e-kube-api-access-6bx78\") pod \"cinder-volume-volume1-0\" (UID: \"9418bcee-6bf4-4758-9ffc-ce6945012a4e\") " pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.168440 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.198117 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.475525 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-z5lr8"] Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.477487 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-z5lr8" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.517055 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-z5lr8"] Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.590245 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-b395-account-create-4xzz6"] Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.591936 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-b395-account-create-4xzz6" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.592456 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f552120-84af-44ca-a39f-cd3e5f895ed5-operator-scripts\") pod \"manila-db-create-z5lr8\" (UID: \"7f552120-84af-44ca-a39f-cd3e5f895ed5\") " pod="openstack/manila-db-create-z5lr8" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.592760 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kxhd\" (UniqueName: \"kubernetes.io/projected/7f552120-84af-44ca-a39f-cd3e5f895ed5-kube-api-access-2kxhd\") pod \"manila-db-create-z5lr8\" (UID: \"7f552120-84af-44ca-a39f-cd3e5f895ed5\") " pod="openstack/manila-db-create-z5lr8" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.596635 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.660802 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-b395-account-create-4xzz6"] Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.696924 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jl7qb\" (UniqueName: \"kubernetes.io/projected/b995efb0-426b-470e-a79c-9be50ad73e64-kube-api-access-jl7qb\") pod \"manila-b395-account-create-4xzz6\" (UID: \"b995efb0-426b-470e-a79c-9be50ad73e64\") " pod="openstack/manila-b395-account-create-4xzz6" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.698272 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f552120-84af-44ca-a39f-cd3e5f895ed5-operator-scripts\") pod \"manila-db-create-z5lr8\" (UID: \"7f552120-84af-44ca-a39f-cd3e5f895ed5\") " pod="openstack/manila-db-create-z5lr8" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.698514 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kxhd\" (UniqueName: \"kubernetes.io/projected/7f552120-84af-44ca-a39f-cd3e5f895ed5-kube-api-access-2kxhd\") pod \"manila-db-create-z5lr8\" (UID: \"7f552120-84af-44ca-a39f-cd3e5f895ed5\") " pod="openstack/manila-db-create-z5lr8" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.698717 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b995efb0-426b-470e-a79c-9be50ad73e64-operator-scripts\") pod \"manila-b395-account-create-4xzz6\" (UID: \"b995efb0-426b-470e-a79c-9be50ad73e64\") " pod="openstack/manila-b395-account-create-4xzz6" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.700263 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f552120-84af-44ca-a39f-cd3e5f895ed5-operator-scripts\") pod \"manila-db-create-z5lr8\" (UID: \"7f552120-84af-44ca-a39f-cd3e5f895ed5\") " pod="openstack/manila-db-create-z5lr8" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.709122 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.711664 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.715257 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.716001 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.716202 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.716470 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-58txk" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.727322 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.757983 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kxhd\" (UniqueName: \"kubernetes.io/projected/7f552120-84af-44ca-a39f-cd3e5f895ed5-kube-api-access-2kxhd\") pod \"manila-db-create-z5lr8\" (UID: \"7f552120-84af-44ca-a39f-cd3e5f895ed5\") " pod="openstack/manila-db-create-z5lr8" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.775052 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.777478 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.783177 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.783478 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.801179 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.801490 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxk9x\" (UniqueName: \"kubernetes.io/projected/1771d537-769e-4578-9722-3131ffc1f447-kube-api-access-hxk9x\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.801682 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1771d537-769e-4578-9722-3131ffc1f447-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.801806 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1771d537-769e-4578-9722-3131ffc1f447-config-data\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.801961 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1771d537-769e-4578-9722-3131ffc1f447-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.802085 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1771d537-769e-4578-9722-3131ffc1f447-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.802196 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/1771d537-769e-4578-9722-3131ffc1f447-ceph\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.802359 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b995efb0-426b-470e-a79c-9be50ad73e64-operator-scripts\") pod \"manila-b395-account-create-4xzz6\" (UID: \"b995efb0-426b-470e-a79c-9be50ad73e64\") " pod="openstack/manila-b395-account-create-4xzz6" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.802445 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1771d537-769e-4578-9722-3131ffc1f447-logs\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.802546 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1771d537-769e-4578-9722-3131ffc1f447-scripts\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.802666 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jl7qb\" (UniqueName: \"kubernetes.io/projected/b995efb0-426b-470e-a79c-9be50ad73e64-kube-api-access-jl7qb\") pod \"manila-b395-account-create-4xzz6\" (UID: \"b995efb0-426b-470e-a79c-9be50ad73e64\") " pod="openstack/manila-b395-account-create-4xzz6" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.803703 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b995efb0-426b-470e-a79c-9be50ad73e64-operator-scripts\") pod \"manila-b395-account-create-4xzz6\" (UID: \"b995efb0-426b-470e-a79c-9be50ad73e64\") " pod="openstack/manila-b395-account-create-4xzz6" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.821744 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jl7qb\" (UniqueName: \"kubernetes.io/projected/b995efb0-426b-470e-a79c-9be50ad73e64-kube-api-access-jl7qb\") pod \"manila-b395-account-create-4xzz6\" (UID: \"b995efb0-426b-470e-a79c-9be50ad73e64\") " pod="openstack/manila-b395-account-create-4xzz6" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.825677 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.841733 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-z5lr8" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.904535 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/89177b29-2825-42dd-9746-b1b33ed4e205-ceph\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.905184 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/89177b29-2825-42dd-9746-b1b33ed4e205-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.905238 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.905294 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89177b29-2825-42dd-9746-b1b33ed4e205-scripts\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.905784 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89177b29-2825-42dd-9746-b1b33ed4e205-config-data\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.905895 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.905972 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxk9x\" (UniqueName: \"kubernetes.io/projected/1771d537-769e-4578-9722-3131ffc1f447-kube-api-access-hxk9x\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.907648 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdvwq\" (UniqueName: \"kubernetes.io/projected/89177b29-2825-42dd-9746-b1b33ed4e205-kube-api-access-qdvwq\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.907711 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1771d537-769e-4578-9722-3131ffc1f447-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.907773 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1771d537-769e-4578-9722-3131ffc1f447-config-data\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.907862 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89177b29-2825-42dd-9746-b1b33ed4e205-logs\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.907908 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1771d537-769e-4578-9722-3131ffc1f447-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.907937 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89177b29-2825-42dd-9746-b1b33ed4e205-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.907966 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89177b29-2825-42dd-9746-b1b33ed4e205-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.908020 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1771d537-769e-4578-9722-3131ffc1f447-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.908049 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/1771d537-769e-4578-9722-3131ffc1f447-ceph\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.908136 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.908271 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1771d537-769e-4578-9722-3131ffc1f447-logs\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.908341 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1771d537-769e-4578-9722-3131ffc1f447-scripts\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.910965 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1771d537-769e-4578-9722-3131ffc1f447-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.911693 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1771d537-769e-4578-9722-3131ffc1f447-logs\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.919506 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1771d537-769e-4578-9722-3131ffc1f447-scripts\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.926144 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-b395-account-create-4xzz6" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.932126 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/1771d537-769e-4578-9722-3131ffc1f447-ceph\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.932408 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1771d537-769e-4578-9722-3131ffc1f447-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.936576 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1771d537-769e-4578-9722-3131ffc1f447-config-data\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.938070 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1771d537-769e-4578-9722-3131ffc1f447-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.954447 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxk9x\" (UniqueName: \"kubernetes.io/projected/1771d537-769e-4578-9722-3131ffc1f447-kube-api-access-hxk9x\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:57 crc kubenswrapper[4800]: I1125 16:12:57.981277 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"1771d537-769e-4578-9722-3131ffc1f447\") " pod="openstack/glance-default-external-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.010388 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.012789 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/89177b29-2825-42dd-9746-b1b33ed4e205-ceph\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.012893 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/89177b29-2825-42dd-9746-b1b33ed4e205-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.012955 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89177b29-2825-42dd-9746-b1b33ed4e205-scripts\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.012986 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89177b29-2825-42dd-9746-b1b33ed4e205-config-data\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.013051 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdvwq\" (UniqueName: \"kubernetes.io/projected/89177b29-2825-42dd-9746-b1b33ed4e205-kube-api-access-qdvwq\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.013133 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89177b29-2825-42dd-9746-b1b33ed4e205-logs\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.013155 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89177b29-2825-42dd-9746-b1b33ed4e205-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.013178 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89177b29-2825-42dd-9746-b1b33ed4e205-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.013273 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.013551 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.015072 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/89177b29-2825-42dd-9746-b1b33ed4e205-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.016520 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89177b29-2825-42dd-9746-b1b33ed4e205-logs\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.021920 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89177b29-2825-42dd-9746-b1b33ed4e205-config-data\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.022044 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89177b29-2825-42dd-9746-b1b33ed4e205-scripts\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.024470 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/89177b29-2825-42dd-9746-b1b33ed4e205-ceph\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.027159 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89177b29-2825-42dd-9746-b1b33ed4e205-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.032673 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89177b29-2825-42dd-9746-b1b33ed4e205-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.055536 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.062101 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdvwq\" (UniqueName: \"kubernetes.io/projected/89177b29-2825-42dd-9746-b1b33ed4e205-kube-api-access-qdvwq\") pod \"glance-default-internal-api-0\" (UID: \"89177b29-2825-42dd-9746-b1b33ed4e205\") " pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.068932 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.106142 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.110532 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.394031 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-z5lr8"] Nov 25 16:12:58 crc kubenswrapper[4800]: W1125 16:12:58.399471 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7f552120_84af_44ca_a39f_cd3e5f895ed5.slice/crio-d9667b9ef012b226603c32b23da8530390e4d118faaf1f6bcf0c0d8be2139d18 WatchSource:0}: Error finding container d9667b9ef012b226603c32b23da8530390e4d118faaf1f6bcf0c0d8be2139d18: Status 404 returned error can't find the container with id d9667b9ef012b226603c32b23da8530390e4d118faaf1f6bcf0c0d8be2139d18 Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.587669 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-b395-account-create-4xzz6"] Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.588563 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-z5lr8" event={"ID":"7f552120-84af-44ca-a39f-cd3e5f895ed5","Type":"ContainerStarted","Data":"d9667b9ef012b226603c32b23da8530390e4d118faaf1f6bcf0c0d8be2139d18"} Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.590355 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"9418bcee-6bf4-4758-9ffc-ce6945012a4e","Type":"ContainerStarted","Data":"0d46c5fcf4f14caaeb15f87f7558a5f779dfd35dcc40271381a3f600eb8ee419"} Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.592232 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"7be6b551-566a-410c-b8f9-892dee455826","Type":"ContainerStarted","Data":"cefef9cda83e0402dd0973c5ff7572b405049be22aa00cd78f53cc4872bcc974"} Nov 25 16:12:58 crc kubenswrapper[4800]: W1125 16:12:58.599399 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb995efb0_426b_470e_a79c_9be50ad73e64.slice/crio-29a727e07e33299be69547ad90b1ce9950c2263567715b42918ad22f951a191e WatchSource:0}: Error finding container 29a727e07e33299be69547ad90b1ce9950c2263567715b42918ad22f951a191e: Status 404 returned error can't find the container with id 29a727e07e33299be69547ad90b1ce9950c2263567715b42918ad22f951a191e Nov 25 16:12:58 crc kubenswrapper[4800]: W1125 16:12:58.798590 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1771d537_769e_4578_9722_3131ffc1f447.slice/crio-9550c64d921c69decf06b1a641556fb561f0e7b65ca6033b014c973417ed4f3a WatchSource:0}: Error finding container 9550c64d921c69decf06b1a641556fb561f0e7b65ca6033b014c973417ed4f3a: Status 404 returned error can't find the container with id 9550c64d921c69decf06b1a641556fb561f0e7b65ca6033b014c973417ed4f3a Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.801733 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 16:12:58 crc kubenswrapper[4800]: I1125 16:12:58.931018 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 16:12:59 crc kubenswrapper[4800]: I1125 16:12:59.613869 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"89177b29-2825-42dd-9746-b1b33ed4e205","Type":"ContainerStarted","Data":"d8a0c53e073639898bda03613d363945268744f6f27ab85db375611630d24950"} Nov 25 16:12:59 crc kubenswrapper[4800]: I1125 16:12:59.618514 4800 generic.go:334] "Generic (PLEG): container finished" podID="7f552120-84af-44ca-a39f-cd3e5f895ed5" containerID="cd1b5681aabde16c1b4e9431e0f1aa4cd99452c70abd7f2895e63710edba1854" exitCode=0 Nov 25 16:12:59 crc kubenswrapper[4800]: I1125 16:12:59.618599 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-z5lr8" event={"ID":"7f552120-84af-44ca-a39f-cd3e5f895ed5","Type":"ContainerDied","Data":"cd1b5681aabde16c1b4e9431e0f1aa4cd99452c70abd7f2895e63710edba1854"} Nov 25 16:12:59 crc kubenswrapper[4800]: I1125 16:12:59.621979 4800 generic.go:334] "Generic (PLEG): container finished" podID="b995efb0-426b-470e-a79c-9be50ad73e64" containerID="75a007bc3f45e2d68a66625de6b8f388c02d9d24d7b0aaa7f3c9eaae61d17620" exitCode=0 Nov 25 16:12:59 crc kubenswrapper[4800]: I1125 16:12:59.622114 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-b395-account-create-4xzz6" event={"ID":"b995efb0-426b-470e-a79c-9be50ad73e64","Type":"ContainerDied","Data":"75a007bc3f45e2d68a66625de6b8f388c02d9d24d7b0aaa7f3c9eaae61d17620"} Nov 25 16:12:59 crc kubenswrapper[4800]: I1125 16:12:59.622916 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-b395-account-create-4xzz6" event={"ID":"b995efb0-426b-470e-a79c-9be50ad73e64","Type":"ContainerStarted","Data":"29a727e07e33299be69547ad90b1ce9950c2263567715b42918ad22f951a191e"} Nov 25 16:12:59 crc kubenswrapper[4800]: I1125 16:12:59.626723 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1771d537-769e-4578-9722-3131ffc1f447","Type":"ContainerStarted","Data":"9550c64d921c69decf06b1a641556fb561f0e7b65ca6033b014c973417ed4f3a"} Nov 25 16:13:00 crc kubenswrapper[4800]: I1125 16:13:00.649973 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"9418bcee-6bf4-4758-9ffc-ce6945012a4e","Type":"ContainerStarted","Data":"0e935e66d69d1715596e2d038e76e96c6077c7bc42c42c294f37343c78fc780d"} Nov 25 16:13:00 crc kubenswrapper[4800]: I1125 16:13:00.655643 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1771d537-769e-4578-9722-3131ffc1f447","Type":"ContainerStarted","Data":"4cc5447f4a4006853d2c3f472da44847d6a8fb846d20c8cc0fb7ecd6429b36e6"} Nov 25 16:13:00 crc kubenswrapper[4800]: I1125 16:13:00.660412 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"7be6b551-566a-410c-b8f9-892dee455826","Type":"ContainerStarted","Data":"f6fbeaf84b4c790ea2be790e706f742380bf11b4ccbcdfb0e252a249e4e8d203"} Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.029876 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-z5lr8" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.206204 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-b395-account-create-4xzz6" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.219180 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kxhd\" (UniqueName: \"kubernetes.io/projected/7f552120-84af-44ca-a39f-cd3e5f895ed5-kube-api-access-2kxhd\") pod \"7f552120-84af-44ca-a39f-cd3e5f895ed5\" (UID: \"7f552120-84af-44ca-a39f-cd3e5f895ed5\") " Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.219501 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f552120-84af-44ca-a39f-cd3e5f895ed5-operator-scripts\") pod \"7f552120-84af-44ca-a39f-cd3e5f895ed5\" (UID: \"7f552120-84af-44ca-a39f-cd3e5f895ed5\") " Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.220544 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f552120-84af-44ca-a39f-cd3e5f895ed5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7f552120-84af-44ca-a39f-cd3e5f895ed5" (UID: "7f552120-84af-44ca-a39f-cd3e5f895ed5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.220750 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f552120-84af-44ca-a39f-cd3e5f895ed5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.234830 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f552120-84af-44ca-a39f-cd3e5f895ed5-kube-api-access-2kxhd" (OuterVolumeSpecName: "kube-api-access-2kxhd") pod "7f552120-84af-44ca-a39f-cd3e5f895ed5" (UID: "7f552120-84af-44ca-a39f-cd3e5f895ed5"). InnerVolumeSpecName "kube-api-access-2kxhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.323132 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b995efb0-426b-470e-a79c-9be50ad73e64-operator-scripts\") pod \"b995efb0-426b-470e-a79c-9be50ad73e64\" (UID: \"b995efb0-426b-470e-a79c-9be50ad73e64\") " Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.323227 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jl7qb\" (UniqueName: \"kubernetes.io/projected/b995efb0-426b-470e-a79c-9be50ad73e64-kube-api-access-jl7qb\") pod \"b995efb0-426b-470e-a79c-9be50ad73e64\" (UID: \"b995efb0-426b-470e-a79c-9be50ad73e64\") " Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.324397 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kxhd\" (UniqueName: \"kubernetes.io/projected/7f552120-84af-44ca-a39f-cd3e5f895ed5-kube-api-access-2kxhd\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.324795 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b995efb0-426b-470e-a79c-9be50ad73e64-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b995efb0-426b-470e-a79c-9be50ad73e64" (UID: "b995efb0-426b-470e-a79c-9be50ad73e64"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.342182 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b995efb0-426b-470e-a79c-9be50ad73e64-kube-api-access-jl7qb" (OuterVolumeSpecName: "kube-api-access-jl7qb") pod "b995efb0-426b-470e-a79c-9be50ad73e64" (UID: "b995efb0-426b-470e-a79c-9be50ad73e64"). InnerVolumeSpecName "kube-api-access-jl7qb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.428951 4800 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b995efb0-426b-470e-a79c-9be50ad73e64-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.429236 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jl7qb\" (UniqueName: \"kubernetes.io/projected/b995efb0-426b-470e-a79c-9be50ad73e64-kube-api-access-jl7qb\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.674276 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1771d537-769e-4578-9722-3131ffc1f447","Type":"ContainerStarted","Data":"4382cf8f618073669e89eccc47171652ebfe170b3ff7564c2cb553bc428d42c3"} Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.680945 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"7be6b551-566a-410c-b8f9-892dee455826","Type":"ContainerStarted","Data":"4ae4a682c0b3c0e32f771507e60bd4a0c25aec2444427ab2cc371f93a62f5d24"} Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.684523 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"89177b29-2825-42dd-9746-b1b33ed4e205","Type":"ContainerStarted","Data":"ecf98cb7dbb50e10ebbc9fd60d32882c18e07abf4eae46af3f355d76fa64e9a9"} Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.685649 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-z5lr8" event={"ID":"7f552120-84af-44ca-a39f-cd3e5f895ed5","Type":"ContainerDied","Data":"d9667b9ef012b226603c32b23da8530390e4d118faaf1f6bcf0c0d8be2139d18"} Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.685756 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9667b9ef012b226603c32b23da8530390e4d118faaf1f6bcf0c0d8be2139d18" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.685896 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-z5lr8" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.687974 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"9418bcee-6bf4-4758-9ffc-ce6945012a4e","Type":"ContainerStarted","Data":"e7b4253b7b38105605e451dac67f4c3d060ef79d472f6b09540ea3e3d31cf119"} Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.690457 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-b395-account-create-4xzz6" event={"ID":"b995efb0-426b-470e-a79c-9be50ad73e64","Type":"ContainerDied","Data":"29a727e07e33299be69547ad90b1ce9950c2263567715b42918ad22f951a191e"} Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.690513 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29a727e07e33299be69547ad90b1ce9950c2263567715b42918ad22f951a191e" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.690573 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-b395-account-create-4xzz6" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.718237 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.718207168 podStartE2EDuration="5.718207168s" podCreationTimestamp="2025-11-25 16:12:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:13:01.699187212 +0000 UTC m=+3342.753595694" watchObservedRunningTime="2025-11-25 16:13:01.718207168 +0000 UTC m=+3342.772615650" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.740671 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=4.061786543 podStartE2EDuration="5.740640097s" podCreationTimestamp="2025-11-25 16:12:56 +0000 UTC" firstStartedPulling="2025-11-25 16:12:58.120051418 +0000 UTC m=+3339.174459900" lastFinishedPulling="2025-11-25 16:12:59.798904982 +0000 UTC m=+3340.853313454" observedRunningTime="2025-11-25 16:13:01.738121009 +0000 UTC m=+3342.792529511" watchObservedRunningTime="2025-11-25 16:13:01.740640097 +0000 UTC m=+3342.795048599" Nov 25 16:13:01 crc kubenswrapper[4800]: I1125 16:13:01.798192 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=4.019642309 podStartE2EDuration="5.79816777s" podCreationTimestamp="2025-11-25 16:12:56 +0000 UTC" firstStartedPulling="2025-11-25 16:12:58.017359609 +0000 UTC m=+3339.071768091" lastFinishedPulling="2025-11-25 16:12:59.79588507 +0000 UTC m=+3340.850293552" observedRunningTime="2025-11-25 16:13:01.783917793 +0000 UTC m=+3342.838326275" watchObservedRunningTime="2025-11-25 16:13:01.79816777 +0000 UTC m=+3342.852576252" Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.168639 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.198146 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.702472 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"89177b29-2825-42dd-9746-b1b33ed4e205","Type":"ContainerStarted","Data":"48d4f4ae9582ec41c77333a060a571c4bd36cd36c02d9a11c40e81ff1016fd12"} Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.729716 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.729690968 podStartE2EDuration="6.729690968s" podCreationTimestamp="2025-11-25 16:12:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:13:02.727491509 +0000 UTC m=+3343.781900031" watchObservedRunningTime="2025-11-25 16:13:02.729690968 +0000 UTC m=+3343.784099460" Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.931806 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-64scj"] Nov 25 16:13:02 crc kubenswrapper[4800]: E1125 16:13:02.932802 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f552120-84af-44ca-a39f-cd3e5f895ed5" containerName="mariadb-database-create" Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.932825 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f552120-84af-44ca-a39f-cd3e5f895ed5" containerName="mariadb-database-create" Nov 25 16:13:02 crc kubenswrapper[4800]: E1125 16:13:02.932863 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b995efb0-426b-470e-a79c-9be50ad73e64" containerName="mariadb-account-create" Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.932876 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b995efb0-426b-470e-a79c-9be50ad73e64" containerName="mariadb-account-create" Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.933108 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b995efb0-426b-470e-a79c-9be50ad73e64" containerName="mariadb-account-create" Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.933131 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f552120-84af-44ca-a39f-cd3e5f895ed5" containerName="mariadb-database-create" Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.934542 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-64scj" Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.939446 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.943243 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-w8nsw" Nov 25 16:13:02 crc kubenswrapper[4800]: I1125 16:13:02.949422 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-64scj"] Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.073274 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzbs5\" (UniqueName: \"kubernetes.io/projected/817a76e7-8681-4597-b14d-d404ad3a2801-kube-api-access-wzbs5\") pod \"manila-db-sync-64scj\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.073341 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-config-data\") pod \"manila-db-sync-64scj\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.073378 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-job-config-data\") pod \"manila-db-sync-64scj\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.073392 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-combined-ca-bundle\") pod \"manila-db-sync-64scj\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.176193 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzbs5\" (UniqueName: \"kubernetes.io/projected/817a76e7-8681-4597-b14d-d404ad3a2801-kube-api-access-wzbs5\") pod \"manila-db-sync-64scj\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.176285 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-config-data\") pod \"manila-db-sync-64scj\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.176327 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-job-config-data\") pod \"manila-db-sync-64scj\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.176345 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-combined-ca-bundle\") pod \"manila-db-sync-64scj\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.200144 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-combined-ca-bundle\") pod \"manila-db-sync-64scj\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.233420 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzbs5\" (UniqueName: \"kubernetes.io/projected/817a76e7-8681-4597-b14d-d404ad3a2801-kube-api-access-wzbs5\") pod \"manila-db-sync-64scj\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.234467 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-config-data\") pod \"manila-db-sync-64scj\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.234708 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-job-config-data\") pod \"manila-db-sync-64scj\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.303757 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-64scj" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.786000 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:13:03 crc kubenswrapper[4800]: E1125 16:13:03.786657 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:13:03 crc kubenswrapper[4800]: I1125 16:13:03.900577 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-64scj"] Nov 25 16:13:04 crc kubenswrapper[4800]: I1125 16:13:04.723750 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-64scj" event={"ID":"817a76e7-8681-4597-b14d-d404ad3a2801","Type":"ContainerStarted","Data":"75f1dfadb0f349fdd85917add40fa7aa42a8560ec33e27535ccbd4810777e556"} Nov 25 16:13:07 crc kubenswrapper[4800]: I1125 16:13:07.485213 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 25 16:13:07 crc kubenswrapper[4800]: I1125 16:13:07.510343 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 25 16:13:08 crc kubenswrapper[4800]: I1125 16:13:08.070202 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 16:13:08 crc kubenswrapper[4800]: I1125 16:13:08.070668 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 16:13:08 crc kubenswrapper[4800]: I1125 16:13:08.112390 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 16:13:08 crc kubenswrapper[4800]: I1125 16:13:08.112439 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 16:13:08 crc kubenswrapper[4800]: I1125 16:13:08.126371 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 16:13:08 crc kubenswrapper[4800]: I1125 16:13:08.128593 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 16:13:08 crc kubenswrapper[4800]: I1125 16:13:08.167151 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 16:13:08 crc kubenswrapper[4800]: I1125 16:13:08.177238 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 16:13:08 crc kubenswrapper[4800]: I1125 16:13:08.773280 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 16:13:08 crc kubenswrapper[4800]: I1125 16:13:08.773323 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 16:13:08 crc kubenswrapper[4800]: I1125 16:13:08.773334 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 16:13:08 crc kubenswrapper[4800]: I1125 16:13:08.773348 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 16:13:11 crc kubenswrapper[4800]: I1125 16:13:11.062824 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 16:13:11 crc kubenswrapper[4800]: I1125 16:13:11.063718 4800 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 16:13:11 crc kubenswrapper[4800]: I1125 16:13:11.072988 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 16:13:11 crc kubenswrapper[4800]: I1125 16:13:11.283795 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 16:13:11 crc kubenswrapper[4800]: I1125 16:13:11.283965 4800 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 16:13:11 crc kubenswrapper[4800]: I1125 16:13:11.316630 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 16:13:13 crc kubenswrapper[4800]: I1125 16:13:13.844913 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-64scj" event={"ID":"817a76e7-8681-4597-b14d-d404ad3a2801","Type":"ContainerStarted","Data":"6100b07b51b3f845eb7751ba052dc017336d470eeb874b6fb0c0b6b031e01163"} Nov 25 16:13:13 crc kubenswrapper[4800]: I1125 16:13:13.887306 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-64scj" podStartSLOduration=3.137295625 podStartE2EDuration="11.88727187s" podCreationTimestamp="2025-11-25 16:13:02 +0000 UTC" firstStartedPulling="2025-11-25 16:13:03.905327017 +0000 UTC m=+3344.959735499" lastFinishedPulling="2025-11-25 16:13:12.655303252 +0000 UTC m=+3353.709711744" observedRunningTime="2025-11-25 16:13:13.869810016 +0000 UTC m=+3354.924218498" watchObservedRunningTime="2025-11-25 16:13:13.88727187 +0000 UTC m=+3354.941680352" Nov 25 16:13:18 crc kubenswrapper[4800]: I1125 16:13:18.789746 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:13:18 crc kubenswrapper[4800]: E1125 16:13:18.790777 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:13:33 crc kubenswrapper[4800]: I1125 16:13:33.055260 4800 generic.go:334] "Generic (PLEG): container finished" podID="817a76e7-8681-4597-b14d-d404ad3a2801" containerID="6100b07b51b3f845eb7751ba052dc017336d470eeb874b6fb0c0b6b031e01163" exitCode=0 Nov 25 16:13:33 crc kubenswrapper[4800]: I1125 16:13:33.055374 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-64scj" event={"ID":"817a76e7-8681-4597-b14d-d404ad3a2801","Type":"ContainerDied","Data":"6100b07b51b3f845eb7751ba052dc017336d470eeb874b6fb0c0b6b031e01163"} Nov 25 16:13:33 crc kubenswrapper[4800]: I1125 16:13:33.787869 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:13:33 crc kubenswrapper[4800]: E1125 16:13:33.788197 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.506431 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-64scj" Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.648616 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-config-data\") pod \"817a76e7-8681-4597-b14d-d404ad3a2801\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.648685 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-job-config-data\") pod \"817a76e7-8681-4597-b14d-d404ad3a2801\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.648784 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzbs5\" (UniqueName: \"kubernetes.io/projected/817a76e7-8681-4597-b14d-d404ad3a2801-kube-api-access-wzbs5\") pod \"817a76e7-8681-4597-b14d-d404ad3a2801\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.648912 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-combined-ca-bundle\") pod \"817a76e7-8681-4597-b14d-d404ad3a2801\" (UID: \"817a76e7-8681-4597-b14d-d404ad3a2801\") " Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.656418 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "817a76e7-8681-4597-b14d-d404ad3a2801" (UID: "817a76e7-8681-4597-b14d-d404ad3a2801"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.660201 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/817a76e7-8681-4597-b14d-d404ad3a2801-kube-api-access-wzbs5" (OuterVolumeSpecName: "kube-api-access-wzbs5") pod "817a76e7-8681-4597-b14d-d404ad3a2801" (UID: "817a76e7-8681-4597-b14d-d404ad3a2801"). InnerVolumeSpecName "kube-api-access-wzbs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.663435 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-config-data" (OuterVolumeSpecName: "config-data") pod "817a76e7-8681-4597-b14d-d404ad3a2801" (UID: "817a76e7-8681-4597-b14d-d404ad3a2801"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.687731 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "817a76e7-8681-4597-b14d-d404ad3a2801" (UID: "817a76e7-8681-4597-b14d-d404ad3a2801"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.752602 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.752656 4800 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.752673 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzbs5\" (UniqueName: \"kubernetes.io/projected/817a76e7-8681-4597-b14d-d404ad3a2801-kube-api-access-wzbs5\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:34 crc kubenswrapper[4800]: I1125 16:13:34.752690 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817a76e7-8681-4597-b14d-d404ad3a2801-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.096158 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-64scj" event={"ID":"817a76e7-8681-4597-b14d-d404ad3a2801","Type":"ContainerDied","Data":"75f1dfadb0f349fdd85917add40fa7aa42a8560ec33e27535ccbd4810777e556"} Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.096217 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-64scj" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.096245 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75f1dfadb0f349fdd85917add40fa7aa42a8560ec33e27535ccbd4810777e556" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.443497 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 16:13:35 crc kubenswrapper[4800]: E1125 16:13:35.444009 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="817a76e7-8681-4597-b14d-d404ad3a2801" containerName="manila-db-sync" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.444024 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="817a76e7-8681-4597-b14d-d404ad3a2801" containerName="manila-db-sync" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.444254 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="817a76e7-8681-4597-b14d-d404ad3a2801" containerName="manila-db-sync" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.446525 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.451413 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.455140 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.455457 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-w8nsw" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.455649 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.468636 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-scripts\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.468739 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3dd43996-4598-45b4-814c-2a64576c4a8b-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.468786 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.468888 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-config-data\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.468947 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.468965 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bdkq\" (UniqueName: \"kubernetes.io/projected/3dd43996-4598-45b4-814c-2a64576c4a8b-kube-api-access-6bdkq\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.498067 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.533400 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.535402 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.538823 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.571417 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.571492 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bdkq\" (UniqueName: \"kubernetes.io/projected/3dd43996-4598-45b4-814c-2a64576c4a8b-kube-api-access-6bdkq\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.571587 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.571621 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/988ef021-f985-428e-a4e2-1ef4cbeb4438-ceph\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.571667 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-config-data\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.571701 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/988ef021-f985-428e-a4e2-1ef4cbeb4438-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.571801 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-scripts\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.571833 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3dd43996-4598-45b4-814c-2a64576c4a8b-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.571901 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.571979 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wzcw\" (UniqueName: \"kubernetes.io/projected/988ef021-f985-428e-a4e2-1ef4cbeb4438-kube-api-access-6wzcw\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.572055 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-config-data\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.572114 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/988ef021-f985-428e-a4e2-1ef4cbeb4438-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.572149 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-scripts\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.572200 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.577017 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3dd43996-4598-45b4-814c-2a64576c4a8b-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.583090 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-scripts\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.584822 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.586423 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.591071 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-config-data\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.626319 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.640574 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bdkq\" (UniqueName: \"kubernetes.io/projected/3dd43996-4598-45b4-814c-2a64576c4a8b-kube-api-access-6bdkq\") pod \"manila-scheduler-0\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.670946 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78f48d6b7c-6g2g2"] Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.673562 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.674215 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wzcw\" (UniqueName: \"kubernetes.io/projected/988ef021-f985-428e-a4e2-1ef4cbeb4438-kube-api-access-6wzcw\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.674280 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/988ef021-f985-428e-a4e2-1ef4cbeb4438-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.674313 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-scripts\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.674354 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.674394 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/988ef021-f985-428e-a4e2-1ef4cbeb4438-ceph\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.674409 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.674430 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-config-data\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.674455 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/988ef021-f985-428e-a4e2-1ef4cbeb4438-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.674588 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/988ef021-f985-428e-a4e2-1ef4cbeb4438-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.688304 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.689140 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-config-data\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.689408 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/988ef021-f985-428e-a4e2-1ef4cbeb4438-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.695497 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78f48d6b7c-6g2g2"] Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.698863 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.704233 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wzcw\" (UniqueName: \"kubernetes.io/projected/988ef021-f985-428e-a4e2-1ef4cbeb4438-kube-api-access-6wzcw\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.706888 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/988ef021-f985-428e-a4e2-1ef4cbeb4438-ceph\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.725692 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-scripts\") pod \"manila-share-share1-0\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.776615 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-dns-svc\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.777055 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-ovsdbserver-nb\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.777113 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-ovsdbserver-sb\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.777169 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-config\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.777308 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-openstack-edpm-ipam\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.777438 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kmlt\" (UniqueName: \"kubernetes.io/projected/9f91611c-ae1b-460f-a9f2-a44e2cae6143-kube-api-access-9kmlt\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.787993 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.857741 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.860937 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.862984 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.866286 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.880568 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8fhc\" (UniqueName: \"kubernetes.io/projected/329cfaaf-7a4a-428f-baae-618b0e388b00-kube-api-access-m8fhc\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.880649 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/329cfaaf-7a4a-428f-baae-618b0e388b00-logs\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.880687 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/329cfaaf-7a4a-428f-baae-618b0e388b00-etc-machine-id\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.880715 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-ovsdbserver-nb\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.880751 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-scripts\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.880799 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-ovsdbserver-sb\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.880855 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-config-data\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.880883 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.881109 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-config\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.881182 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-openstack-edpm-ipam\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.881351 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kmlt\" (UniqueName: \"kubernetes.io/projected/9f91611c-ae1b-460f-a9f2-a44e2cae6143-kube-api-access-9kmlt\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.881499 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-config-data-custom\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.881658 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-dns-svc\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.885685 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-openstack-edpm-ipam\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.886582 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-ovsdbserver-sb\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.886608 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-dns-svc\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.886858 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-config\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.887163 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f91611c-ae1b-460f-a9f2-a44e2cae6143-ovsdbserver-nb\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.890818 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.916678 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kmlt\" (UniqueName: \"kubernetes.io/projected/9f91611c-ae1b-460f-a9f2-a44e2cae6143-kube-api-access-9kmlt\") pod \"dnsmasq-dns-78f48d6b7c-6g2g2\" (UID: \"9f91611c-ae1b-460f-a9f2-a44e2cae6143\") " pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.984427 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8fhc\" (UniqueName: \"kubernetes.io/projected/329cfaaf-7a4a-428f-baae-618b0e388b00-kube-api-access-m8fhc\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.984486 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/329cfaaf-7a4a-428f-baae-618b0e388b00-logs\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.984526 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/329cfaaf-7a4a-428f-baae-618b0e388b00-etc-machine-id\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.984585 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-scripts\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.984649 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-config-data\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.984669 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.984784 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-config-data-custom\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.987315 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/329cfaaf-7a4a-428f-baae-618b0e388b00-logs\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:35 crc kubenswrapper[4800]: I1125 16:13:35.988000 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/329cfaaf-7a4a-428f-baae-618b0e388b00-etc-machine-id\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:36 crc kubenswrapper[4800]: I1125 16:13:36.001738 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-scripts\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:36 crc kubenswrapper[4800]: I1125 16:13:36.001826 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:36 crc kubenswrapper[4800]: I1125 16:13:36.002450 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-config-data\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:36 crc kubenswrapper[4800]: I1125 16:13:36.006858 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-config-data-custom\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:36 crc kubenswrapper[4800]: I1125 16:13:36.010883 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8fhc\" (UniqueName: \"kubernetes.io/projected/329cfaaf-7a4a-428f-baae-618b0e388b00-kube-api-access-m8fhc\") pod \"manila-api-0\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " pod="openstack/manila-api-0" Nov 25 16:13:36 crc kubenswrapper[4800]: I1125 16:13:36.185215 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:36 crc kubenswrapper[4800]: I1125 16:13:36.228957 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 16:13:36 crc kubenswrapper[4800]: I1125 16:13:36.439757 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 16:13:36 crc kubenswrapper[4800]: I1125 16:13:36.681328 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 16:13:36 crc kubenswrapper[4800]: W1125 16:13:36.707129 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod988ef021_f985_428e_a4e2_1ef4cbeb4438.slice/crio-1a3af295c98a71fa641ae7ce640c8d0ff6cd050538b662274d561a4bd34c0b7c WatchSource:0}: Error finding container 1a3af295c98a71fa641ae7ce640c8d0ff6cd050538b662274d561a4bd34c0b7c: Status 404 returned error can't find the container with id 1a3af295c98a71fa641ae7ce640c8d0ff6cd050538b662274d561a4bd34c0b7c Nov 25 16:13:36 crc kubenswrapper[4800]: I1125 16:13:36.782662 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78f48d6b7c-6g2g2"] Nov 25 16:13:37 crc kubenswrapper[4800]: I1125 16:13:37.017642 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 25 16:13:37 crc kubenswrapper[4800]: W1125 16:13:37.021283 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod329cfaaf_7a4a_428f_baae_618b0e388b00.slice/crio-fa5fe77f953ae541d136cadb0fd25348df858fea5a649ef73a880b3076789ac1 WatchSource:0}: Error finding container fa5fe77f953ae541d136cadb0fd25348df858fea5a649ef73a880b3076789ac1: Status 404 returned error can't find the container with id fa5fe77f953ae541d136cadb0fd25348df858fea5a649ef73a880b3076789ac1 Nov 25 16:13:37 crc kubenswrapper[4800]: I1125 16:13:37.146738 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"329cfaaf-7a4a-428f-baae-618b0e388b00","Type":"ContainerStarted","Data":"fa5fe77f953ae541d136cadb0fd25348df858fea5a649ef73a880b3076789ac1"} Nov 25 16:13:37 crc kubenswrapper[4800]: I1125 16:13:37.149869 4800 generic.go:334] "Generic (PLEG): container finished" podID="9f91611c-ae1b-460f-a9f2-a44e2cae6143" containerID="74f234445215d0736f83c82d5cbb3a61a451d8e8ccc47d9721010f7af7460689" exitCode=0 Nov 25 16:13:37 crc kubenswrapper[4800]: I1125 16:13:37.149927 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" event={"ID":"9f91611c-ae1b-460f-a9f2-a44e2cae6143","Type":"ContainerDied","Data":"74f234445215d0736f83c82d5cbb3a61a451d8e8ccc47d9721010f7af7460689"} Nov 25 16:13:37 crc kubenswrapper[4800]: I1125 16:13:37.149952 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" event={"ID":"9f91611c-ae1b-460f-a9f2-a44e2cae6143","Type":"ContainerStarted","Data":"cbc46360cda2664bda8ff156e960fc56d5aac70bfcf8776c5781ddb3fff4c42a"} Nov 25 16:13:37 crc kubenswrapper[4800]: I1125 16:13:37.159339 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"988ef021-f985-428e-a4e2-1ef4cbeb4438","Type":"ContainerStarted","Data":"1a3af295c98a71fa641ae7ce640c8d0ff6cd050538b662274d561a4bd34c0b7c"} Nov 25 16:13:37 crc kubenswrapper[4800]: I1125 16:13:37.162020 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"3dd43996-4598-45b4-814c-2a64576c4a8b","Type":"ContainerStarted","Data":"5d396207204f60bfbd36ad94088b57851e642036a27695a7f8f0ff248c88857f"} Nov 25 16:13:38 crc kubenswrapper[4800]: I1125 16:13:38.193020 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"329cfaaf-7a4a-428f-baae-618b0e388b00","Type":"ContainerStarted","Data":"90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d"} Nov 25 16:13:38 crc kubenswrapper[4800]: I1125 16:13:38.196707 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" event={"ID":"9f91611c-ae1b-460f-a9f2-a44e2cae6143","Type":"ContainerStarted","Data":"6cb35357a6e82d06bcc84a4a0f99559e030e54372c9e4929f8d4e90ccd5eb20a"} Nov 25 16:13:38 crc kubenswrapper[4800]: I1125 16:13:38.196879 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:38 crc kubenswrapper[4800]: I1125 16:13:38.225351 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" podStartSLOduration=3.225329682 podStartE2EDuration="3.225329682s" podCreationTimestamp="2025-11-25 16:13:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:13:38.216281516 +0000 UTC m=+3379.270690008" watchObservedRunningTime="2025-11-25 16:13:38.225329682 +0000 UTC m=+3379.279738154" Nov 25 16:13:38 crc kubenswrapper[4800]: I1125 16:13:38.845732 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 25 16:13:39 crc kubenswrapper[4800]: I1125 16:13:39.215747 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"329cfaaf-7a4a-428f-baae-618b0e388b00","Type":"ContainerStarted","Data":"af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc"} Nov 25 16:13:39 crc kubenswrapper[4800]: I1125 16:13:39.216437 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 16:13:39 crc kubenswrapper[4800]: I1125 16:13:39.219316 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"3dd43996-4598-45b4-814c-2a64576c4a8b","Type":"ContainerStarted","Data":"25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72"} Nov 25 16:13:39 crc kubenswrapper[4800]: I1125 16:13:39.219384 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"3dd43996-4598-45b4-814c-2a64576c4a8b","Type":"ContainerStarted","Data":"3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe"} Nov 25 16:13:39 crc kubenswrapper[4800]: I1125 16:13:39.276209 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=4.276181601 podStartE2EDuration="4.276181601s" podCreationTimestamp="2025-11-25 16:13:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:13:39.239178456 +0000 UTC m=+3380.293586948" watchObservedRunningTime="2025-11-25 16:13:39.276181601 +0000 UTC m=+3380.330590083" Nov 25 16:13:39 crc kubenswrapper[4800]: I1125 16:13:39.283821 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.421055477 podStartE2EDuration="4.283799268s" podCreationTimestamp="2025-11-25 16:13:35 +0000 UTC" firstStartedPulling="2025-11-25 16:13:36.457327536 +0000 UTC m=+3377.511736018" lastFinishedPulling="2025-11-25 16:13:37.320071327 +0000 UTC m=+3378.374479809" observedRunningTime="2025-11-25 16:13:39.270407744 +0000 UTC m=+3380.324816226" watchObservedRunningTime="2025-11-25 16:13:39.283799268 +0000 UTC m=+3380.338207750" Nov 25 16:13:40 crc kubenswrapper[4800]: I1125 16:13:40.234904 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="329cfaaf-7a4a-428f-baae-618b0e388b00" containerName="manila-api-log" containerID="cri-o://90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d" gracePeriod=30 Nov 25 16:13:40 crc kubenswrapper[4800]: I1125 16:13:40.235110 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="329cfaaf-7a4a-428f-baae-618b0e388b00" containerName="manila-api" containerID="cri-o://af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc" gracePeriod=30 Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.010082 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.167669 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-config-data-custom\") pod \"329cfaaf-7a4a-428f-baae-618b0e388b00\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.168427 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/329cfaaf-7a4a-428f-baae-618b0e388b00-logs\") pod \"329cfaaf-7a4a-428f-baae-618b0e388b00\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.168708 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-combined-ca-bundle\") pod \"329cfaaf-7a4a-428f-baae-618b0e388b00\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.168934 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-scripts\") pod \"329cfaaf-7a4a-428f-baae-618b0e388b00\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.169010 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/329cfaaf-7a4a-428f-baae-618b0e388b00-logs" (OuterVolumeSpecName: "logs") pod "329cfaaf-7a4a-428f-baae-618b0e388b00" (UID: "329cfaaf-7a4a-428f-baae-618b0e388b00"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.169032 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/329cfaaf-7a4a-428f-baae-618b0e388b00-etc-machine-id\") pod \"329cfaaf-7a4a-428f-baae-618b0e388b00\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.169254 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/329cfaaf-7a4a-428f-baae-618b0e388b00-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "329cfaaf-7a4a-428f-baae-618b0e388b00" (UID: "329cfaaf-7a4a-428f-baae-618b0e388b00"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.169294 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-config-data\") pod \"329cfaaf-7a4a-428f-baae-618b0e388b00\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.169501 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8fhc\" (UniqueName: \"kubernetes.io/projected/329cfaaf-7a4a-428f-baae-618b0e388b00-kube-api-access-m8fhc\") pod \"329cfaaf-7a4a-428f-baae-618b0e388b00\" (UID: \"329cfaaf-7a4a-428f-baae-618b0e388b00\") " Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.170612 4800 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/329cfaaf-7a4a-428f-baae-618b0e388b00-logs\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.170825 4800 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/329cfaaf-7a4a-428f-baae-618b0e388b00-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.178204 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-scripts" (OuterVolumeSpecName: "scripts") pod "329cfaaf-7a4a-428f-baae-618b0e388b00" (UID: "329cfaaf-7a4a-428f-baae-618b0e388b00"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.178387 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "329cfaaf-7a4a-428f-baae-618b0e388b00" (UID: "329cfaaf-7a4a-428f-baae-618b0e388b00"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.179425 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/329cfaaf-7a4a-428f-baae-618b0e388b00-kube-api-access-m8fhc" (OuterVolumeSpecName: "kube-api-access-m8fhc") pod "329cfaaf-7a4a-428f-baae-618b0e388b00" (UID: "329cfaaf-7a4a-428f-baae-618b0e388b00"). InnerVolumeSpecName "kube-api-access-m8fhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.222302 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "329cfaaf-7a4a-428f-baae-618b0e388b00" (UID: "329cfaaf-7a4a-428f-baae-618b0e388b00"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.236985 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-config-data" (OuterVolumeSpecName: "config-data") pod "329cfaaf-7a4a-428f-baae-618b0e388b00" (UID: "329cfaaf-7a4a-428f-baae-618b0e388b00"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.274002 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.274463 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.274780 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.274900 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8fhc\" (UniqueName: \"kubernetes.io/projected/329cfaaf-7a4a-428f-baae-618b0e388b00-kube-api-access-m8fhc\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.274966 4800 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/329cfaaf-7a4a-428f-baae-618b0e388b00-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.275588 4800 generic.go:334] "Generic (PLEG): container finished" podID="329cfaaf-7a4a-428f-baae-618b0e388b00" containerID="af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc" exitCode=0 Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.275656 4800 generic.go:334] "Generic (PLEG): container finished" podID="329cfaaf-7a4a-428f-baae-618b0e388b00" containerID="90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d" exitCode=143 Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.275723 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"329cfaaf-7a4a-428f-baae-618b0e388b00","Type":"ContainerDied","Data":"af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc"} Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.275863 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"329cfaaf-7a4a-428f-baae-618b0e388b00","Type":"ContainerDied","Data":"90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d"} Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.275889 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"329cfaaf-7a4a-428f-baae-618b0e388b00","Type":"ContainerDied","Data":"fa5fe77f953ae541d136cadb0fd25348df858fea5a649ef73a880b3076789ac1"} Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.275918 4800 scope.go:117] "RemoveContainer" containerID="af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.276182 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.335446 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.356941 4800 scope.go:117] "RemoveContainer" containerID="90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.361465 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-api-0"] Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.382869 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 25 16:13:41 crc kubenswrapper[4800]: E1125 16:13:41.383733 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="329cfaaf-7a4a-428f-baae-618b0e388b00" containerName="manila-api" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.383753 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="329cfaaf-7a4a-428f-baae-618b0e388b00" containerName="manila-api" Nov 25 16:13:41 crc kubenswrapper[4800]: E1125 16:13:41.383791 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="329cfaaf-7a4a-428f-baae-618b0e388b00" containerName="manila-api-log" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.383798 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="329cfaaf-7a4a-428f-baae-618b0e388b00" containerName="manila-api-log" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.384039 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="329cfaaf-7a4a-428f-baae-618b0e388b00" containerName="manila-api-log" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.384053 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="329cfaaf-7a4a-428f-baae-618b0e388b00" containerName="manila-api" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.385355 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.391610 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-public-svc" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.391928 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.392077 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-internal-svc" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.406880 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.448307 4800 scope.go:117] "RemoveContainer" containerID="af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc" Nov 25 16:13:41 crc kubenswrapper[4800]: E1125 16:13:41.448875 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc\": container with ID starting with af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc not found: ID does not exist" containerID="af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.448955 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc"} err="failed to get container status \"af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc\": rpc error: code = NotFound desc = could not find container \"af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc\": container with ID starting with af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc not found: ID does not exist" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.449001 4800 scope.go:117] "RemoveContainer" containerID="90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d" Nov 25 16:13:41 crc kubenswrapper[4800]: E1125 16:13:41.449394 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d\": container with ID starting with 90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d not found: ID does not exist" containerID="90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.449439 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d"} err="failed to get container status \"90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d\": rpc error: code = NotFound desc = could not find container \"90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d\": container with ID starting with 90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d not found: ID does not exist" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.449471 4800 scope.go:117] "RemoveContainer" containerID="af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.450174 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc"} err="failed to get container status \"af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc\": rpc error: code = NotFound desc = could not find container \"af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc\": container with ID starting with af199809ad68ea43d93313e55f7a26f7d48ba409f6a104b974683ce326791abc not found: ID does not exist" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.450239 4800 scope.go:117] "RemoveContainer" containerID="90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.451795 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d"} err="failed to get container status \"90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d\": rpc error: code = NotFound desc = could not find container \"90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d\": container with ID starting with 90216d90db259e4ecccb8a037b47e57680407e9e132659238932581c928d596d not found: ID does not exist" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.489391 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-public-tls-certs\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.489675 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-config-data\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.489881 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2533dec9-48e6-4f7e-8d9e-d90e5db00418-etc-machine-id\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.489962 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2533dec9-48e6-4f7e-8d9e-d90e5db00418-logs\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.490008 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnjqq\" (UniqueName: \"kubernetes.io/projected/2533dec9-48e6-4f7e-8d9e-d90e5db00418-kube-api-access-lnjqq\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.490299 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-scripts\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.490459 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-internal-tls-certs\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.490709 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.490830 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-config-data-custom\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.593676 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-scripts\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.594236 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-internal-tls-certs\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.594661 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.594862 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-config-data-custom\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.595062 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-public-tls-certs\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.595312 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-config-data\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.595559 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2533dec9-48e6-4f7e-8d9e-d90e5db00418-etc-machine-id\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.595703 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2533dec9-48e6-4f7e-8d9e-d90e5db00418-logs\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.595817 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnjqq\" (UniqueName: \"kubernetes.io/projected/2533dec9-48e6-4f7e-8d9e-d90e5db00418-kube-api-access-lnjqq\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.596872 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2533dec9-48e6-4f7e-8d9e-d90e5db00418-etc-machine-id\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.598029 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2533dec9-48e6-4f7e-8d9e-d90e5db00418-logs\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.602521 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.602891 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-internal-tls-certs\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.602899 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-config-data-custom\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.603037 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-scripts\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.608233 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-public-tls-certs\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.610372 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2533dec9-48e6-4f7e-8d9e-d90e5db00418-config-data\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.631564 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnjqq\" (UniqueName: \"kubernetes.io/projected/2533dec9-48e6-4f7e-8d9e-d90e5db00418-kube-api-access-lnjqq\") pod \"manila-api-0\" (UID: \"2533dec9-48e6-4f7e-8d9e-d90e5db00418\") " pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.745537 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 16:13:41 crc kubenswrapper[4800]: I1125 16:13:41.807815 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="329cfaaf-7a4a-428f-baae-618b0e388b00" path="/var/lib/kubelet/pods/329cfaaf-7a4a-428f-baae-618b0e388b00/volumes" Nov 25 16:13:42 crc kubenswrapper[4800]: I1125 16:13:42.444098 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 25 16:13:43 crc kubenswrapper[4800]: I1125 16:13:43.051656 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 16:13:43 crc kubenswrapper[4800]: I1125 16:13:43.052425 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="ceilometer-central-agent" containerID="cri-o://95a43851290ab3ca21c711171aa3fb4632ba69eebc8052a48e6062109743050d" gracePeriod=30 Nov 25 16:13:43 crc kubenswrapper[4800]: I1125 16:13:43.052516 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="ceilometer-notification-agent" containerID="cri-o://3448eb5f60230f656f77ca6b19dc952b1bdf03162467550c1a85c9a5f14af140" gracePeriod=30 Nov 25 16:13:43 crc kubenswrapper[4800]: I1125 16:13:43.052516 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="proxy-httpd" containerID="cri-o://eee29a325706c6dbd9944099e81e5dcbefbcf4d85e88f1061c3e2c7708f09d8b" gracePeriod=30 Nov 25 16:13:43 crc kubenswrapper[4800]: I1125 16:13:43.052516 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="sg-core" containerID="cri-o://4b3d9c7c7d5f4095b55b7aa4ee435bb28afe8abd069a1d5124945ce34593e04d" gracePeriod=30 Nov 25 16:13:43 crc kubenswrapper[4800]: I1125 16:13:43.314550 4800 generic.go:334] "Generic (PLEG): container finished" podID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerID="eee29a325706c6dbd9944099e81e5dcbefbcf4d85e88f1061c3e2c7708f09d8b" exitCode=0 Nov 25 16:13:43 crc kubenswrapper[4800]: I1125 16:13:43.314613 4800 generic.go:334] "Generic (PLEG): container finished" podID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerID="4b3d9c7c7d5f4095b55b7aa4ee435bb28afe8abd069a1d5124945ce34593e04d" exitCode=2 Nov 25 16:13:43 crc kubenswrapper[4800]: I1125 16:13:43.314712 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83921bea-cb41-4b95-8e56-4f49cae7cba3","Type":"ContainerDied","Data":"eee29a325706c6dbd9944099e81e5dcbefbcf4d85e88f1061c3e2c7708f09d8b"} Nov 25 16:13:43 crc kubenswrapper[4800]: I1125 16:13:43.314810 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83921bea-cb41-4b95-8e56-4f49cae7cba3","Type":"ContainerDied","Data":"4b3d9c7c7d5f4095b55b7aa4ee435bb28afe8abd069a1d5124945ce34593e04d"} Nov 25 16:13:44 crc kubenswrapper[4800]: I1125 16:13:44.340022 4800 generic.go:334] "Generic (PLEG): container finished" podID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerID="95a43851290ab3ca21c711171aa3fb4632ba69eebc8052a48e6062109743050d" exitCode=0 Nov 25 16:13:44 crc kubenswrapper[4800]: I1125 16:13:44.340251 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83921bea-cb41-4b95-8e56-4f49cae7cba3","Type":"ContainerDied","Data":"95a43851290ab3ca21c711171aa3fb4632ba69eebc8052a48e6062109743050d"} Nov 25 16:13:45 crc kubenswrapper[4800]: I1125 16:13:45.797864 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.188261 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78f48d6b7c-6g2g2" Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.278572 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c58867b6c-fng46"] Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.278972 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-c58867b6c-fng46" podUID="c9fb8541-9c86-4587-a27d-01ebf680fcc1" containerName="dnsmasq-dns" containerID="cri-o://e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e" gracePeriod=10 Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.449006 4800 generic.go:334] "Generic (PLEG): container finished" podID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerID="3448eb5f60230f656f77ca6b19dc952b1bdf03162467550c1a85c9a5f14af140" exitCode=0 Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.449543 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83921bea-cb41-4b95-8e56-4f49cae7cba3","Type":"ContainerDied","Data":"3448eb5f60230f656f77ca6b19dc952b1bdf03162467550c1a85c9a5f14af140"} Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.511480 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"2533dec9-48e6-4f7e-8d9e-d90e5db00418","Type":"ContainerStarted","Data":"417d0ed11a775ac977af393d70863f3f722f0785b5629975350e445b588098a5"} Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.511534 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"2533dec9-48e6-4f7e-8d9e-d90e5db00418","Type":"ContainerStarted","Data":"c2e8dea19066e1665ab88b6a23a8912d355ddf258b3450ac5aec1e4c18458d5d"} Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.524830 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"988ef021-f985-428e-a4e2-1ef4cbeb4438","Type":"ContainerStarted","Data":"475a6c78dddffc4cbad18c8ea408a71f331d7782bdfe25bda95a16e9e1a9b311"} Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.786978 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.857225 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.950931 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mqwr\" (UniqueName: \"kubernetes.io/projected/83921bea-cb41-4b95-8e56-4f49cae7cba3-kube-api-access-6mqwr\") pod \"83921bea-cb41-4b95-8e56-4f49cae7cba3\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.951027 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-sg-core-conf-yaml\") pod \"83921bea-cb41-4b95-8e56-4f49cae7cba3\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.951151 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-combined-ca-bundle\") pod \"83921bea-cb41-4b95-8e56-4f49cae7cba3\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.951197 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83921bea-cb41-4b95-8e56-4f49cae7cba3-log-httpd\") pod \"83921bea-cb41-4b95-8e56-4f49cae7cba3\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.951233 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-config-data\") pod \"83921bea-cb41-4b95-8e56-4f49cae7cba3\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.951278 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83921bea-cb41-4b95-8e56-4f49cae7cba3-run-httpd\") pod \"83921bea-cb41-4b95-8e56-4f49cae7cba3\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.951397 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-ceilometer-tls-certs\") pod \"83921bea-cb41-4b95-8e56-4f49cae7cba3\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.951531 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-scripts\") pod \"83921bea-cb41-4b95-8e56-4f49cae7cba3\" (UID: \"83921bea-cb41-4b95-8e56-4f49cae7cba3\") " Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.953171 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83921bea-cb41-4b95-8e56-4f49cae7cba3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "83921bea-cb41-4b95-8e56-4f49cae7cba3" (UID: "83921bea-cb41-4b95-8e56-4f49cae7cba3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.953710 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83921bea-cb41-4b95-8e56-4f49cae7cba3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "83921bea-cb41-4b95-8e56-4f49cae7cba3" (UID: "83921bea-cb41-4b95-8e56-4f49cae7cba3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.983084 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-scripts" (OuterVolumeSpecName: "scripts") pod "83921bea-cb41-4b95-8e56-4f49cae7cba3" (UID: "83921bea-cb41-4b95-8e56-4f49cae7cba3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:46 crc kubenswrapper[4800]: I1125 16:13:46.983136 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83921bea-cb41-4b95-8e56-4f49cae7cba3-kube-api-access-6mqwr" (OuterVolumeSpecName: "kube-api-access-6mqwr") pod "83921bea-cb41-4b95-8e56-4f49cae7cba3" (UID: "83921bea-cb41-4b95-8e56-4f49cae7cba3"). InnerVolumeSpecName "kube-api-access-6mqwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.054881 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mqwr\" (UniqueName: \"kubernetes.io/projected/83921bea-cb41-4b95-8e56-4f49cae7cba3-kube-api-access-6mqwr\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.054951 4800 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83921bea-cb41-4b95-8e56-4f49cae7cba3-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.054969 4800 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83921bea-cb41-4b95-8e56-4f49cae7cba3-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.054982 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.069168 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "83921bea-cb41-4b95-8e56-4f49cae7cba3" (UID: "83921bea-cb41-4b95-8e56-4f49cae7cba3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.107735 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "83921bea-cb41-4b95-8e56-4f49cae7cba3" (UID: "83921bea-cb41-4b95-8e56-4f49cae7cba3"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.136931 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83921bea-cb41-4b95-8e56-4f49cae7cba3" (UID: "83921bea-cb41-4b95-8e56-4f49cae7cba3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.147237 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.157536 4800 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.157790 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.157917 4800 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.259049 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-ovsdbserver-nb\") pod \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.259686 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-ovsdbserver-sb\") pod \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.259860 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-dns-svc\") pod \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.260065 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-config\") pod \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.260141 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-openstack-edpm-ipam\") pod \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.260402 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2k8c\" (UniqueName: \"kubernetes.io/projected/c9fb8541-9c86-4587-a27d-01ebf680fcc1-kube-api-access-j2k8c\") pod \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\" (UID: \"c9fb8541-9c86-4587-a27d-01ebf680fcc1\") " Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.263149 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-config-data" (OuterVolumeSpecName: "config-data") pod "83921bea-cb41-4b95-8e56-4f49cae7cba3" (UID: "83921bea-cb41-4b95-8e56-4f49cae7cba3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.267487 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9fb8541-9c86-4587-a27d-01ebf680fcc1-kube-api-access-j2k8c" (OuterVolumeSpecName: "kube-api-access-j2k8c") pod "c9fb8541-9c86-4587-a27d-01ebf680fcc1" (UID: "c9fb8541-9c86-4587-a27d-01ebf680fcc1"). InnerVolumeSpecName "kube-api-access-j2k8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.322759 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c9fb8541-9c86-4587-a27d-01ebf680fcc1" (UID: "c9fb8541-9c86-4587-a27d-01ebf680fcc1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.345909 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c9fb8541-9c86-4587-a27d-01ebf680fcc1" (UID: "c9fb8541-9c86-4587-a27d-01ebf680fcc1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.357368 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-config" (OuterVolumeSpecName: "config") pod "c9fb8541-9c86-4587-a27d-01ebf680fcc1" (UID: "c9fb8541-9c86-4587-a27d-01ebf680fcc1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.360407 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c9fb8541-9c86-4587-a27d-01ebf680fcc1" (UID: "c9fb8541-9c86-4587-a27d-01ebf680fcc1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.363379 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2k8c\" (UniqueName: \"kubernetes.io/projected/c9fb8541-9c86-4587-a27d-01ebf680fcc1-kube-api-access-j2k8c\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.363606 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.363685 4800 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.363795 4800 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.363884 4800 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.364021 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83921bea-cb41-4b95-8e56-4f49cae7cba3-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.382629 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "c9fb8541-9c86-4587-a27d-01ebf680fcc1" (UID: "c9fb8541-9c86-4587-a27d-01ebf680fcc1"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.466158 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c9fb8541-9c86-4587-a27d-01ebf680fcc1-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.540620 4800 generic.go:334] "Generic (PLEG): container finished" podID="c9fb8541-9c86-4587-a27d-01ebf680fcc1" containerID="e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e" exitCode=0 Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.540807 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c58867b6c-fng46" event={"ID":"c9fb8541-9c86-4587-a27d-01ebf680fcc1","Type":"ContainerDied","Data":"e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e"} Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.541173 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c58867b6c-fng46" event={"ID":"c9fb8541-9c86-4587-a27d-01ebf680fcc1","Type":"ContainerDied","Data":"b423841e63f0ee99f40dee2260eee3396fb0da41141aeb935e0d39b42721bbc8"} Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.541208 4800 scope.go:117] "RemoveContainer" containerID="e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.540940 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c58867b6c-fng46" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.549570 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.550147 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83921bea-cb41-4b95-8e56-4f49cae7cba3","Type":"ContainerDied","Data":"aac89edd3df31dcba710a55d212026560abceaf25321cf38167c0747a340138f"} Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.556955 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"1fbc3a3e2d5adfe5cf06ea9c7ae6a3f2cf274b421d1863625e183da94fc1b793"} Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.566000 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"2533dec9-48e6-4f7e-8d9e-d90e5db00418","Type":"ContainerStarted","Data":"33233a6e599c10b893a144b8f54d7069b457e12a912a188120182d983034e260"} Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.566198 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.578659 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"988ef021-f985-428e-a4e2-1ef4cbeb4438","Type":"ContainerStarted","Data":"5cc41b8662120e04b08d74a47070aa117d4aca3d09300fe9e4ca639e19d5c785"} Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.666117 4800 scope.go:117] "RemoveContainer" containerID="85ae38eba70d0e7b7e670c5e6b9f8bd09439ec586c81738fe4443ff0626d48da" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.666433 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=6.666396196 podStartE2EDuration="6.666396196s" podCreationTimestamp="2025-11-25 16:13:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:13:47.618135065 +0000 UTC m=+3388.672543547" watchObservedRunningTime="2025-11-25 16:13:47.666396196 +0000 UTC m=+3388.720804678" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.681399 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.794207303 podStartE2EDuration="12.681372383s" podCreationTimestamp="2025-11-25 16:13:35 +0000 UTC" firstStartedPulling="2025-11-25 16:13:36.714594313 +0000 UTC m=+3377.769002795" lastFinishedPulling="2025-11-25 16:13:45.601759393 +0000 UTC m=+3386.656167875" observedRunningTime="2025-11-25 16:13:47.65069056 +0000 UTC m=+3388.705099062" watchObservedRunningTime="2025-11-25 16:13:47.681372383 +0000 UTC m=+3388.735780865" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.740407 4800 scope.go:117] "RemoveContainer" containerID="e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e" Nov 25 16:13:47 crc kubenswrapper[4800]: E1125 16:13:47.744975 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e\": container with ID starting with e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e not found: ID does not exist" containerID="e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.745060 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e"} err="failed to get container status \"e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e\": rpc error: code = NotFound desc = could not find container \"e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e\": container with ID starting with e309a7b75a3a0cfce85f23c72ce70d3c9f2ec41c785f227c0b4fbccf8aec287e not found: ID does not exist" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.745086 4800 scope.go:117] "RemoveContainer" containerID="85ae38eba70d0e7b7e670c5e6b9f8bd09439ec586c81738fe4443ff0626d48da" Nov 25 16:13:47 crc kubenswrapper[4800]: E1125 16:13:47.746680 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85ae38eba70d0e7b7e670c5e6b9f8bd09439ec586c81738fe4443ff0626d48da\": container with ID starting with 85ae38eba70d0e7b7e670c5e6b9f8bd09439ec586c81738fe4443ff0626d48da not found: ID does not exist" containerID="85ae38eba70d0e7b7e670c5e6b9f8bd09439ec586c81738fe4443ff0626d48da" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.746735 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85ae38eba70d0e7b7e670c5e6b9f8bd09439ec586c81738fe4443ff0626d48da"} err="failed to get container status \"85ae38eba70d0e7b7e670c5e6b9f8bd09439ec586c81738fe4443ff0626d48da\": rpc error: code = NotFound desc = could not find container \"85ae38eba70d0e7b7e670c5e6b9f8bd09439ec586c81738fe4443ff0626d48da\": container with ID starting with 85ae38eba70d0e7b7e670c5e6b9f8bd09439ec586c81738fe4443ff0626d48da not found: ID does not exist" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.746770 4800 scope.go:117] "RemoveContainer" containerID="eee29a325706c6dbd9944099e81e5dcbefbcf4d85e88f1061c3e2c7708f09d8b" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.755905 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.783371 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.808275 4800 scope.go:117] "RemoveContainer" containerID="4b3d9c7c7d5f4095b55b7aa4ee435bb28afe8abd069a1d5124945ce34593e04d" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.813348 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" path="/var/lib/kubelet/pods/83921bea-cb41-4b95-8e56-4f49cae7cba3/volumes" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.814554 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c58867b6c-fng46"] Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.822730 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-c58867b6c-fng46"] Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.830938 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 16:13:47 crc kubenswrapper[4800]: E1125 16:13:47.831605 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="proxy-httpd" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.831629 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="proxy-httpd" Nov 25 16:13:47 crc kubenswrapper[4800]: E1125 16:13:47.831662 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="sg-core" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.831673 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="sg-core" Nov 25 16:13:47 crc kubenswrapper[4800]: E1125 16:13:47.831687 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9fb8541-9c86-4587-a27d-01ebf680fcc1" containerName="init" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.831695 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9fb8541-9c86-4587-a27d-01ebf680fcc1" containerName="init" Nov 25 16:13:47 crc kubenswrapper[4800]: E1125 16:13:47.831712 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="ceilometer-notification-agent" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.831724 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="ceilometer-notification-agent" Nov 25 16:13:47 crc kubenswrapper[4800]: E1125 16:13:47.831751 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="ceilometer-central-agent" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.831762 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="ceilometer-central-agent" Nov 25 16:13:47 crc kubenswrapper[4800]: E1125 16:13:47.831783 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9fb8541-9c86-4587-a27d-01ebf680fcc1" containerName="dnsmasq-dns" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.831792 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9fb8541-9c86-4587-a27d-01ebf680fcc1" containerName="dnsmasq-dns" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.832148 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="sg-core" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.832166 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="ceilometer-notification-agent" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.832182 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="ceilometer-central-agent" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.832220 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9fb8541-9c86-4587-a27d-01ebf680fcc1" containerName="dnsmasq-dns" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.832237 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="83921bea-cb41-4b95-8e56-4f49cae7cba3" containerName="proxy-httpd" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.837784 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.837882 4800 scope.go:117] "RemoveContainer" containerID="3448eb5f60230f656f77ca6b19dc952b1bdf03162467550c1a85c9a5f14af140" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.842612 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.854917 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.869829 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.893660 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.921995 4800 scope.go:117] "RemoveContainer" containerID="95a43851290ab3ca21c711171aa3fb4632ba69eebc8052a48e6062109743050d" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.985654 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/427f6aff-5b6f-419a-a730-e311d677c4a2-run-httpd\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.986051 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.986376 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.986472 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-config-data\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.986511 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/427f6aff-5b6f-419a-a730-e311d677c4a2-log-httpd\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.986621 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw7lq\" (UniqueName: \"kubernetes.io/projected/427f6aff-5b6f-419a-a730-e311d677c4a2-kube-api-access-xw7lq\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.986671 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:47 crc kubenswrapper[4800]: I1125 16:13:47.995614 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-scripts\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.099351 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-scripts\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.099456 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/427f6aff-5b6f-419a-a730-e311d677c4a2-run-httpd\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.099542 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.099621 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.099863 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-config-data\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.099891 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/427f6aff-5b6f-419a-a730-e311d677c4a2-log-httpd\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.099942 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw7lq\" (UniqueName: \"kubernetes.io/projected/427f6aff-5b6f-419a-a730-e311d677c4a2-kube-api-access-xw7lq\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.099978 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.106515 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/427f6aff-5b6f-419a-a730-e311d677c4a2-log-httpd\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.109671 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/427f6aff-5b6f-419a-a730-e311d677c4a2-run-httpd\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.110536 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.110784 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.122924 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-scripts\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.129466 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw7lq\" (UniqueName: \"kubernetes.io/projected/427f6aff-5b6f-419a-a730-e311d677c4a2-kube-api-access-xw7lq\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.130401 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-config-data\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.135026 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.197548 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 16:13:48 crc kubenswrapper[4800]: I1125 16:13:48.735580 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 16:13:49 crc kubenswrapper[4800]: I1125 16:13:49.605512 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"427f6aff-5b6f-419a-a730-e311d677c4a2","Type":"ContainerStarted","Data":"e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae"} Nov 25 16:13:49 crc kubenswrapper[4800]: I1125 16:13:49.606386 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"427f6aff-5b6f-419a-a730-e311d677c4a2","Type":"ContainerStarted","Data":"0935951611ba875d56feaf4627b6abbe1b102367a40d01fd36249a873b52319f"} Nov 25 16:13:49 crc kubenswrapper[4800]: I1125 16:13:49.763653 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 16:13:49 crc kubenswrapper[4800]: I1125 16:13:49.798548 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9fb8541-9c86-4587-a27d-01ebf680fcc1" path="/var/lib/kubelet/pods/c9fb8541-9c86-4587-a27d-01ebf680fcc1/volumes" Nov 25 16:13:50 crc kubenswrapper[4800]: I1125 16:13:50.631507 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"427f6aff-5b6f-419a-a730-e311d677c4a2","Type":"ContainerStarted","Data":"7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798"} Nov 25 16:13:51 crc kubenswrapper[4800]: I1125 16:13:51.647419 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"427f6aff-5b6f-419a-a730-e311d677c4a2","Type":"ContainerStarted","Data":"5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c"} Nov 25 16:13:53 crc kubenswrapper[4800]: I1125 16:13:53.671251 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"427f6aff-5b6f-419a-a730-e311d677c4a2","Type":"ContainerStarted","Data":"70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f"} Nov 25 16:13:53 crc kubenswrapper[4800]: I1125 16:13:53.672371 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="ceilometer-central-agent" containerID="cri-o://e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae" gracePeriod=30 Nov 25 16:13:53 crc kubenswrapper[4800]: I1125 16:13:53.672729 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 16:13:53 crc kubenswrapper[4800]: I1125 16:13:53.674759 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="proxy-httpd" containerID="cri-o://70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f" gracePeriod=30 Nov 25 16:13:53 crc kubenswrapper[4800]: I1125 16:13:53.674946 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="sg-core" containerID="cri-o://5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c" gracePeriod=30 Nov 25 16:13:53 crc kubenswrapper[4800]: I1125 16:13:53.675007 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="ceilometer-notification-agent" containerID="cri-o://7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798" gracePeriod=30 Nov 25 16:13:53 crc kubenswrapper[4800]: I1125 16:13:53.720054 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.538061666 podStartE2EDuration="6.720023152s" podCreationTimestamp="2025-11-25 16:13:47 +0000 UTC" firstStartedPulling="2025-11-25 16:13:48.731108061 +0000 UTC m=+3389.785516543" lastFinishedPulling="2025-11-25 16:13:52.913069547 +0000 UTC m=+3393.967478029" observedRunningTime="2025-11-25 16:13:53.706457194 +0000 UTC m=+3394.760865676" watchObservedRunningTime="2025-11-25 16:13:53.720023152 +0000 UTC m=+3394.774431644" Nov 25 16:13:54 crc kubenswrapper[4800]: I1125 16:13:54.685155 4800 generic.go:334] "Generic (PLEG): container finished" podID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerID="70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f" exitCode=0 Nov 25 16:13:54 crc kubenswrapper[4800]: I1125 16:13:54.686748 4800 generic.go:334] "Generic (PLEG): container finished" podID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerID="5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c" exitCode=2 Nov 25 16:13:54 crc kubenswrapper[4800]: I1125 16:13:54.686977 4800 generic.go:334] "Generic (PLEG): container finished" podID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerID="7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798" exitCode=0 Nov 25 16:13:54 crc kubenswrapper[4800]: I1125 16:13:54.685247 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"427f6aff-5b6f-419a-a730-e311d677c4a2","Type":"ContainerDied","Data":"70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f"} Nov 25 16:13:54 crc kubenswrapper[4800]: I1125 16:13:54.687670 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"427f6aff-5b6f-419a-a730-e311d677c4a2","Type":"ContainerDied","Data":"5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c"} Nov 25 16:13:54 crc kubenswrapper[4800]: I1125 16:13:54.687751 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"427f6aff-5b6f-419a-a730-e311d677c4a2","Type":"ContainerDied","Data":"7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798"} Nov 25 16:13:55 crc kubenswrapper[4800]: I1125 16:13:55.858636 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.408516 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.508680 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/427f6aff-5b6f-419a-a730-e311d677c4a2-log-httpd\") pod \"427f6aff-5b6f-419a-a730-e311d677c4a2\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.508761 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-ceilometer-tls-certs\") pod \"427f6aff-5b6f-419a-a730-e311d677c4a2\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.508812 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xw7lq\" (UniqueName: \"kubernetes.io/projected/427f6aff-5b6f-419a-a730-e311d677c4a2-kube-api-access-xw7lq\") pod \"427f6aff-5b6f-419a-a730-e311d677c4a2\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.508955 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-scripts\") pod \"427f6aff-5b6f-419a-a730-e311d677c4a2\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.509184 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/427f6aff-5b6f-419a-a730-e311d677c4a2-run-httpd\") pod \"427f6aff-5b6f-419a-a730-e311d677c4a2\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.509247 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-combined-ca-bundle\") pod \"427f6aff-5b6f-419a-a730-e311d677c4a2\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.509292 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-sg-core-conf-yaml\") pod \"427f6aff-5b6f-419a-a730-e311d677c4a2\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.509358 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-config-data\") pod \"427f6aff-5b6f-419a-a730-e311d677c4a2\" (UID: \"427f6aff-5b6f-419a-a730-e311d677c4a2\") " Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.509446 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/427f6aff-5b6f-419a-a730-e311d677c4a2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "427f6aff-5b6f-419a-a730-e311d677c4a2" (UID: "427f6aff-5b6f-419a-a730-e311d677c4a2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.509929 4800 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/427f6aff-5b6f-419a-a730-e311d677c4a2-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.510798 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/427f6aff-5b6f-419a-a730-e311d677c4a2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "427f6aff-5b6f-419a-a730-e311d677c4a2" (UID: "427f6aff-5b6f-419a-a730-e311d677c4a2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.516544 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-scripts" (OuterVolumeSpecName: "scripts") pod "427f6aff-5b6f-419a-a730-e311d677c4a2" (UID: "427f6aff-5b6f-419a-a730-e311d677c4a2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.526432 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/427f6aff-5b6f-419a-a730-e311d677c4a2-kube-api-access-xw7lq" (OuterVolumeSpecName: "kube-api-access-xw7lq") pod "427f6aff-5b6f-419a-a730-e311d677c4a2" (UID: "427f6aff-5b6f-419a-a730-e311d677c4a2"). InnerVolumeSpecName "kube-api-access-xw7lq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.557022 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "427f6aff-5b6f-419a-a730-e311d677c4a2" (UID: "427f6aff-5b6f-419a-a730-e311d677c4a2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.586246 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "427f6aff-5b6f-419a-a730-e311d677c4a2" (UID: "427f6aff-5b6f-419a-a730-e311d677c4a2"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.613290 4800 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.613335 4800 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.613352 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xw7lq\" (UniqueName: \"kubernetes.io/projected/427f6aff-5b6f-419a-a730-e311d677c4a2-kube-api-access-xw7lq\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.613366 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.613378 4800 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/427f6aff-5b6f-419a-a730-e311d677c4a2-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.618294 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "427f6aff-5b6f-419a-a730-e311d677c4a2" (UID: "427f6aff-5b6f-419a-a730-e311d677c4a2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.629733 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-config-data" (OuterVolumeSpecName: "config-data") pod "427f6aff-5b6f-419a-a730-e311d677c4a2" (UID: "427f6aff-5b6f-419a-a730-e311d677c4a2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.716044 4800 generic.go:334] "Generic (PLEG): container finished" podID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerID="e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae" exitCode=0 Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.716112 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"427f6aff-5b6f-419a-a730-e311d677c4a2","Type":"ContainerDied","Data":"e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae"} Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.716161 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"427f6aff-5b6f-419a-a730-e311d677c4a2","Type":"ContainerDied","Data":"0935951611ba875d56feaf4627b6abbe1b102367a40d01fd36249a873b52319f"} Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.716187 4800 scope.go:117] "RemoveContainer" containerID="70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.716418 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.716534 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.716568 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/427f6aff-5b6f-419a-a730-e311d677c4a2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.745467 4800 scope.go:117] "RemoveContainer" containerID="5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.774567 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.795131 4800 scope.go:117] "RemoveContainer" containerID="7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.805120 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.835935 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 16:13:56 crc kubenswrapper[4800]: E1125 16:13:56.836615 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="sg-core" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.836640 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="sg-core" Nov 25 16:13:56 crc kubenswrapper[4800]: E1125 16:13:56.836677 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="ceilometer-central-agent" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.836688 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="ceilometer-central-agent" Nov 25 16:13:56 crc kubenswrapper[4800]: E1125 16:13:56.836728 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="ceilometer-notification-agent" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.836735 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="ceilometer-notification-agent" Nov 25 16:13:56 crc kubenswrapper[4800]: E1125 16:13:56.836755 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="proxy-httpd" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.836762 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="proxy-httpd" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.836983 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="sg-core" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.836997 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="ceilometer-central-agent" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.837015 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="ceilometer-notification-agent" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.837025 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" containerName="proxy-httpd" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.841526 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.844642 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.844995 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.846202 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.848064 4800 scope.go:117] "RemoveContainer" containerID="e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.851753 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.874490 4800 scope.go:117] "RemoveContainer" containerID="70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f" Nov 25 16:13:56 crc kubenswrapper[4800]: E1125 16:13:56.876900 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f\": container with ID starting with 70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f not found: ID does not exist" containerID="70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.876940 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f"} err="failed to get container status \"70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f\": rpc error: code = NotFound desc = could not find container \"70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f\": container with ID starting with 70e5d6084ff66e7599ebd24d0028954b9c84d3d138d8fd3ea2fad8fc1e68f65f not found: ID does not exist" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.876968 4800 scope.go:117] "RemoveContainer" containerID="5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c" Nov 25 16:13:56 crc kubenswrapper[4800]: E1125 16:13:56.877645 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c\": container with ID starting with 5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c not found: ID does not exist" containerID="5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.877720 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c"} err="failed to get container status \"5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c\": rpc error: code = NotFound desc = could not find container \"5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c\": container with ID starting with 5c32e7181c62c7fd1a3951f6a6b4eca8a6d797dd639c6def1cd8626e6325de7c not found: ID does not exist" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.877766 4800 scope.go:117] "RemoveContainer" containerID="7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798" Nov 25 16:13:56 crc kubenswrapper[4800]: E1125 16:13:56.878264 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798\": container with ID starting with 7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798 not found: ID does not exist" containerID="7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.878321 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798"} err="failed to get container status \"7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798\": rpc error: code = NotFound desc = could not find container \"7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798\": container with ID starting with 7ef911113a9dc8ad6c5da52ee08041a73de07c5ba8ca83c1b515dd7ce9bc3798 not found: ID does not exist" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.878347 4800 scope.go:117] "RemoveContainer" containerID="e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae" Nov 25 16:13:56 crc kubenswrapper[4800]: E1125 16:13:56.878739 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae\": container with ID starting with e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae not found: ID does not exist" containerID="e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.878786 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae"} err="failed to get container status \"e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae\": rpc error: code = NotFound desc = could not find container \"e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae\": container with ID starting with e2971094d921644565fef4856e9e392ace3f40b34d990a3f8fa6993ccc75f2ae not found: ID does not exist" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.927031 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2bcf406e-1184-44de-a565-974dd28d1256-log-httpd\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.927123 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.927228 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jw5dw\" (UniqueName: \"kubernetes.io/projected/2bcf406e-1184-44de-a565-974dd28d1256-kube-api-access-jw5dw\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.927272 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.927291 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-config-data\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.927401 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2bcf406e-1184-44de-a565-974dd28d1256-run-httpd\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.927470 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:56 crc kubenswrapper[4800]: I1125 16:13:56.927578 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-scripts\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.030281 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.030463 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jw5dw\" (UniqueName: \"kubernetes.io/projected/2bcf406e-1184-44de-a565-974dd28d1256-kube-api-access-jw5dw\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.030537 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.030576 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-config-data\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.030664 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2bcf406e-1184-44de-a565-974dd28d1256-run-httpd\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.030741 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.030835 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-scripts\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.030904 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2bcf406e-1184-44de-a565-974dd28d1256-log-httpd\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.031342 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2bcf406e-1184-44de-a565-974dd28d1256-run-httpd\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.031495 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2bcf406e-1184-44de-a565-974dd28d1256-log-httpd\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.035453 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.036214 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-config-data\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.036607 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.039093 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.040066 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bcf406e-1184-44de-a565-974dd28d1256-scripts\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.059733 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jw5dw\" (UniqueName: \"kubernetes.io/projected/2bcf406e-1184-44de-a565-974dd28d1256-kube-api-access-jw5dw\") pod \"ceilometer-0\" (UID: \"2bcf406e-1184-44de-a565-974dd28d1256\") " pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.167378 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.656532 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 25 16:13:57 crc kubenswrapper[4800]: W1125 16:13:57.726245 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2bcf406e_1184_44de_a565_974dd28d1256.slice/crio-fe8e0895cefc4d80e6ada4637259bc639ad19419d91462e0ad5483ed085d9aa7 WatchSource:0}: Error finding container fe8e0895cefc4d80e6ada4637259bc639ad19419d91462e0ad5483ed085d9aa7: Status 404 returned error can't find the container with id fe8e0895cefc4d80e6ada4637259bc639ad19419d91462e0ad5483ed085d9aa7 Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.742083 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.751089 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.751948 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="3dd43996-4598-45b4-814c-2a64576c4a8b" containerName="manila-scheduler" containerID="cri-o://3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe" gracePeriod=30 Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.752213 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="3dd43996-4598-45b4-814c-2a64576c4a8b" containerName="probe" containerID="cri-o://25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72" gracePeriod=30 Nov 25 16:13:57 crc kubenswrapper[4800]: I1125 16:13:57.798418 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="427f6aff-5b6f-419a-a730-e311d677c4a2" path="/var/lib/kubelet/pods/427f6aff-5b6f-419a-a730-e311d677c4a2/volumes" Nov 25 16:13:58 crc kubenswrapper[4800]: I1125 16:13:58.763502 4800 generic.go:334] "Generic (PLEG): container finished" podID="3dd43996-4598-45b4-814c-2a64576c4a8b" containerID="25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72" exitCode=0 Nov 25 16:13:58 crc kubenswrapper[4800]: I1125 16:13:58.763594 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"3dd43996-4598-45b4-814c-2a64576c4a8b","Type":"ContainerDied","Data":"25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72"} Nov 25 16:13:58 crc kubenswrapper[4800]: I1125 16:13:58.766951 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2bcf406e-1184-44de-a565-974dd28d1256","Type":"ContainerStarted","Data":"24edbaaa85f50facb17c2190c195be147f0cb046644e7060e9bf56d55c06e7cc"} Nov 25 16:13:58 crc kubenswrapper[4800]: I1125 16:13:58.766996 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2bcf406e-1184-44de-a565-974dd28d1256","Type":"ContainerStarted","Data":"fe8e0895cefc4d80e6ada4637259bc639ad19419d91462e0ad5483ed085d9aa7"} Nov 25 16:13:59 crc kubenswrapper[4800]: I1125 16:13:59.778989 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2bcf406e-1184-44de-a565-974dd28d1256","Type":"ContainerStarted","Data":"d43cdc6d37b25ed5bb6b357f7ae4319415dcce384bdbe008038b9373a0758c19"} Nov 25 16:14:00 crc kubenswrapper[4800]: I1125 16:14:00.799275 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2bcf406e-1184-44de-a565-974dd28d1256","Type":"ContainerStarted","Data":"eb9ec21f5f2e8b8d9c6ee929bd4858ecd2cbc496a6ec53482b468c99a1f4f32c"} Nov 25 16:14:01 crc kubenswrapper[4800]: I1125 16:14:01.819434 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2bcf406e-1184-44de-a565-974dd28d1256","Type":"ContainerStarted","Data":"6e495279a9ba1ff940c9e7977e3655226858a1c4967f0195f42f168eea09c42b"} Nov 25 16:14:01 crc kubenswrapper[4800]: I1125 16:14:01.819932 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 16:14:01 crc kubenswrapper[4800]: I1125 16:14:01.859100 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.070384402 podStartE2EDuration="5.859072866s" podCreationTimestamp="2025-11-25 16:13:56 +0000 UTC" firstStartedPulling="2025-11-25 16:13:57.729748621 +0000 UTC m=+3398.784157103" lastFinishedPulling="2025-11-25 16:14:01.518437095 +0000 UTC m=+3402.572845567" observedRunningTime="2025-11-25 16:14:01.844761688 +0000 UTC m=+3402.899170170" watchObservedRunningTime="2025-11-25 16:14:01.859072866 +0000 UTC m=+3402.913481348" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.284926 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.451241 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.513384 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-config-data\") pod \"3dd43996-4598-45b4-814c-2a64576c4a8b\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.513528 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-config-data-custom\") pod \"3dd43996-4598-45b4-814c-2a64576c4a8b\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.513653 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-scripts\") pod \"3dd43996-4598-45b4-814c-2a64576c4a8b\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.513691 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3dd43996-4598-45b4-814c-2a64576c4a8b-etc-machine-id\") pod \"3dd43996-4598-45b4-814c-2a64576c4a8b\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.513748 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-combined-ca-bundle\") pod \"3dd43996-4598-45b4-814c-2a64576c4a8b\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.513800 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bdkq\" (UniqueName: \"kubernetes.io/projected/3dd43996-4598-45b4-814c-2a64576c4a8b-kube-api-access-6bdkq\") pod \"3dd43996-4598-45b4-814c-2a64576c4a8b\" (UID: \"3dd43996-4598-45b4-814c-2a64576c4a8b\") " Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.515771 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3dd43996-4598-45b4-814c-2a64576c4a8b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "3dd43996-4598-45b4-814c-2a64576c4a8b" (UID: "3dd43996-4598-45b4-814c-2a64576c4a8b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.533114 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-scripts" (OuterVolumeSpecName: "scripts") pod "3dd43996-4598-45b4-814c-2a64576c4a8b" (UID: "3dd43996-4598-45b4-814c-2a64576c4a8b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.551121 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dd43996-4598-45b4-814c-2a64576c4a8b-kube-api-access-6bdkq" (OuterVolumeSpecName: "kube-api-access-6bdkq") pod "3dd43996-4598-45b4-814c-2a64576c4a8b" (UID: "3dd43996-4598-45b4-814c-2a64576c4a8b"). InnerVolumeSpecName "kube-api-access-6bdkq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.558091 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3dd43996-4598-45b4-814c-2a64576c4a8b" (UID: "3dd43996-4598-45b4-814c-2a64576c4a8b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.623523 4800 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.623564 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.623573 4800 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3dd43996-4598-45b4-814c-2a64576c4a8b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.623585 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bdkq\" (UniqueName: \"kubernetes.io/projected/3dd43996-4598-45b4-814c-2a64576c4a8b-kube-api-access-6bdkq\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.642119 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3dd43996-4598-45b4-814c-2a64576c4a8b" (UID: "3dd43996-4598-45b4-814c-2a64576c4a8b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.699225 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-config-data" (OuterVolumeSpecName: "config-data") pod "3dd43996-4598-45b4-814c-2a64576c4a8b" (UID: "3dd43996-4598-45b4-814c-2a64576c4a8b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.727079 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.727127 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dd43996-4598-45b4-814c-2a64576c4a8b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.849362 4800 generic.go:334] "Generic (PLEG): container finished" podID="3dd43996-4598-45b4-814c-2a64576c4a8b" containerID="3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe" exitCode=0 Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.849418 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"3dd43996-4598-45b4-814c-2a64576c4a8b","Type":"ContainerDied","Data":"3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe"} Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.849457 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"3dd43996-4598-45b4-814c-2a64576c4a8b","Type":"ContainerDied","Data":"5d396207204f60bfbd36ad94088b57851e642036a27695a7f8f0ff248c88857f"} Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.849485 4800 scope.go:117] "RemoveContainer" containerID="25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.849685 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.895318 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.905900 4800 scope.go:117] "RemoveContainer" containerID="3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.917626 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.934978 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 16:14:03 crc kubenswrapper[4800]: E1125 16:14:03.935520 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd43996-4598-45b4-814c-2a64576c4a8b" containerName="probe" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.935556 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd43996-4598-45b4-814c-2a64576c4a8b" containerName="probe" Nov 25 16:14:03 crc kubenswrapper[4800]: E1125 16:14:03.935583 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd43996-4598-45b4-814c-2a64576c4a8b" containerName="manila-scheduler" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.935590 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd43996-4598-45b4-814c-2a64576c4a8b" containerName="manila-scheduler" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.935814 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dd43996-4598-45b4-814c-2a64576c4a8b" containerName="manila-scheduler" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.935859 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dd43996-4598-45b4-814c-2a64576c4a8b" containerName="probe" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.937197 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.940667 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.959010 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.965271 4800 scope.go:117] "RemoveContainer" containerID="25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72" Nov 25 16:14:03 crc kubenswrapper[4800]: E1125 16:14:03.967128 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72\": container with ID starting with 25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72 not found: ID does not exist" containerID="25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.967175 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72"} err="failed to get container status \"25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72\": rpc error: code = NotFound desc = could not find container \"25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72\": container with ID starting with 25139dd5657fc060ae6d2e0ef2774fdd127b63433729047ee1404aecd7663a72 not found: ID does not exist" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.967200 4800 scope.go:117] "RemoveContainer" containerID="3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe" Nov 25 16:14:03 crc kubenswrapper[4800]: E1125 16:14:03.967510 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe\": container with ID starting with 3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe not found: ID does not exist" containerID="3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe" Nov 25 16:14:03 crc kubenswrapper[4800]: I1125 16:14:03.967531 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe"} err="failed to get container status \"3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe\": rpc error: code = NotFound desc = could not find container \"3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe\": container with ID starting with 3a599208f17bd80ed9b3055bf028cf27a6bc773e75641d3d0cf17581f994e6fe not found: ID does not exist" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.034902 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ff783144-a701-4dd1-b275-89049f1e49d1-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.035002 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff783144-a701-4dd1-b275-89049f1e49d1-config-data\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.035084 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff783144-a701-4dd1-b275-89049f1e49d1-scripts\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.035188 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5879\" (UniqueName: \"kubernetes.io/projected/ff783144-a701-4dd1-b275-89049f1e49d1-kube-api-access-k5879\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.035220 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff783144-a701-4dd1-b275-89049f1e49d1-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.035247 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff783144-a701-4dd1-b275-89049f1e49d1-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.138350 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff783144-a701-4dd1-b275-89049f1e49d1-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.138529 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ff783144-a701-4dd1-b275-89049f1e49d1-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.138607 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff783144-a701-4dd1-b275-89049f1e49d1-config-data\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.138700 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff783144-a701-4dd1-b275-89049f1e49d1-scripts\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.138715 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ff783144-a701-4dd1-b275-89049f1e49d1-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.138741 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5879\" (UniqueName: \"kubernetes.io/projected/ff783144-a701-4dd1-b275-89049f1e49d1-kube-api-access-k5879\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.138886 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff783144-a701-4dd1-b275-89049f1e49d1-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.145820 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff783144-a701-4dd1-b275-89049f1e49d1-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.147890 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff783144-a701-4dd1-b275-89049f1e49d1-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.152780 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff783144-a701-4dd1-b275-89049f1e49d1-config-data\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.157481 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5879\" (UniqueName: \"kubernetes.io/projected/ff783144-a701-4dd1-b275-89049f1e49d1-kube-api-access-k5879\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.159767 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff783144-a701-4dd1-b275-89049f1e49d1-scripts\") pod \"manila-scheduler-0\" (UID: \"ff783144-a701-4dd1-b275-89049f1e49d1\") " pod="openstack/manila-scheduler-0" Nov 25 16:14:04 crc kubenswrapper[4800]: I1125 16:14:04.275518 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 16:14:05 crc kubenswrapper[4800]: I1125 16:14:05.662059 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 16:14:05 crc kubenswrapper[4800]: W1125 16:14:05.681235 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff783144_a701_4dd1_b275_89049f1e49d1.slice/crio-91cb0e589d92b205229f83643b4498232645af1236d6cbecc259e1bfbc07439d WatchSource:0}: Error finding container 91cb0e589d92b205229f83643b4498232645af1236d6cbecc259e1bfbc07439d: Status 404 returned error can't find the container with id 91cb0e589d92b205229f83643b4498232645af1236d6cbecc259e1bfbc07439d Nov 25 16:14:05 crc kubenswrapper[4800]: I1125 16:14:05.800104 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dd43996-4598-45b4-814c-2a64576c4a8b" path="/var/lib/kubelet/pods/3dd43996-4598-45b4-814c-2a64576c4a8b/volumes" Nov 25 16:14:05 crc kubenswrapper[4800]: I1125 16:14:05.875746 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"ff783144-a701-4dd1-b275-89049f1e49d1","Type":"ContainerStarted","Data":"91cb0e589d92b205229f83643b4498232645af1236d6cbecc259e1bfbc07439d"} Nov 25 16:14:06 crc kubenswrapper[4800]: I1125 16:14:06.895402 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"ff783144-a701-4dd1-b275-89049f1e49d1","Type":"ContainerStarted","Data":"f08149c916be9b3288930ac07d81b9b9833e888f08a8e2ff49c954277056c5c1"} Nov 25 16:14:06 crc kubenswrapper[4800]: I1125 16:14:06.896170 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"ff783144-a701-4dd1-b275-89049f1e49d1","Type":"ContainerStarted","Data":"97aa20329ba1bf3b0e2cff0c379232747f1d7b5e75bedb7a954b40b8f492a25e"} Nov 25 16:14:06 crc kubenswrapper[4800]: I1125 16:14:06.931903 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.9318725839999997 podStartE2EDuration="3.931872584s" podCreationTimestamp="2025-11-25 16:14:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:14:06.92360049 +0000 UTC m=+3407.978008992" watchObservedRunningTime="2025-11-25 16:14:06.931872584 +0000 UTC m=+3407.986281066" Nov 25 16:14:07 crc kubenswrapper[4800]: I1125 16:14:07.575534 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 25 16:14:07 crc kubenswrapper[4800]: I1125 16:14:07.669251 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 16:14:07 crc kubenswrapper[4800]: I1125 16:14:07.905328 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="988ef021-f985-428e-a4e2-1ef4cbeb4438" containerName="manila-share" containerID="cri-o://475a6c78dddffc4cbad18c8ea408a71f331d7782bdfe25bda95a16e9e1a9b311" gracePeriod=30 Nov 25 16:14:07 crc kubenswrapper[4800]: I1125 16:14:07.905421 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="988ef021-f985-428e-a4e2-1ef4cbeb4438" containerName="probe" containerID="cri-o://5cc41b8662120e04b08d74a47070aa117d4aca3d09300fe9e4ca639e19d5c785" gracePeriod=30 Nov 25 16:14:08 crc kubenswrapper[4800]: I1125 16:14:08.921680 4800 generic.go:334] "Generic (PLEG): container finished" podID="988ef021-f985-428e-a4e2-1ef4cbeb4438" containerID="5cc41b8662120e04b08d74a47070aa117d4aca3d09300fe9e4ca639e19d5c785" exitCode=0 Nov 25 16:14:08 crc kubenswrapper[4800]: I1125 16:14:08.922065 4800 generic.go:334] "Generic (PLEG): container finished" podID="988ef021-f985-428e-a4e2-1ef4cbeb4438" containerID="475a6c78dddffc4cbad18c8ea408a71f331d7782bdfe25bda95a16e9e1a9b311" exitCode=1 Nov 25 16:14:08 crc kubenswrapper[4800]: I1125 16:14:08.922090 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"988ef021-f985-428e-a4e2-1ef4cbeb4438","Type":"ContainerDied","Data":"5cc41b8662120e04b08d74a47070aa117d4aca3d09300fe9e4ca639e19d5c785"} Nov 25 16:14:08 crc kubenswrapper[4800]: I1125 16:14:08.922119 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"988ef021-f985-428e-a4e2-1ef4cbeb4438","Type":"ContainerDied","Data":"475a6c78dddffc4cbad18c8ea408a71f331d7782bdfe25bda95a16e9e1a9b311"} Nov 25 16:14:08 crc kubenswrapper[4800]: I1125 16:14:08.922129 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"988ef021-f985-428e-a4e2-1ef4cbeb4438","Type":"ContainerDied","Data":"1a3af295c98a71fa641ae7ce640c8d0ff6cd050538b662274d561a4bd34c0b7c"} Nov 25 16:14:08 crc kubenswrapper[4800]: I1125 16:14:08.922139 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a3af295c98a71fa641ae7ce640c8d0ff6cd050538b662274d561a4bd34c0b7c" Nov 25 16:14:08 crc kubenswrapper[4800]: I1125 16:14:08.991033 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.067401 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/988ef021-f985-428e-a4e2-1ef4cbeb4438-ceph\") pod \"988ef021-f985-428e-a4e2-1ef4cbeb4438\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.067987 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wzcw\" (UniqueName: \"kubernetes.io/projected/988ef021-f985-428e-a4e2-1ef4cbeb4438-kube-api-access-6wzcw\") pod \"988ef021-f985-428e-a4e2-1ef4cbeb4438\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.068129 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/988ef021-f985-428e-a4e2-1ef4cbeb4438-etc-machine-id\") pod \"988ef021-f985-428e-a4e2-1ef4cbeb4438\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.068168 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-combined-ca-bundle\") pod \"988ef021-f985-428e-a4e2-1ef4cbeb4438\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.068252 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/988ef021-f985-428e-a4e2-1ef4cbeb4438-var-lib-manila\") pod \"988ef021-f985-428e-a4e2-1ef4cbeb4438\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.068492 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-config-data-custom\") pod \"988ef021-f985-428e-a4e2-1ef4cbeb4438\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.068539 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-config-data\") pod \"988ef021-f985-428e-a4e2-1ef4cbeb4438\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.068631 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-scripts\") pod \"988ef021-f985-428e-a4e2-1ef4cbeb4438\" (UID: \"988ef021-f985-428e-a4e2-1ef4cbeb4438\") " Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.069126 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/988ef021-f985-428e-a4e2-1ef4cbeb4438-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "988ef021-f985-428e-a4e2-1ef4cbeb4438" (UID: "988ef021-f985-428e-a4e2-1ef4cbeb4438"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.069326 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/988ef021-f985-428e-a4e2-1ef4cbeb4438-var-lib-manila" (OuterVolumeSpecName: "var-lib-manila") pod "988ef021-f985-428e-a4e2-1ef4cbeb4438" (UID: "988ef021-f985-428e-a4e2-1ef4cbeb4438"). InnerVolumeSpecName "var-lib-manila". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.070064 4800 reconciler_common.go:293] "Volume detached for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/988ef021-f985-428e-a4e2-1ef4cbeb4438-var-lib-manila\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.070098 4800 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/988ef021-f985-428e-a4e2-1ef4cbeb4438-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.079753 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-scripts" (OuterVolumeSpecName: "scripts") pod "988ef021-f985-428e-a4e2-1ef4cbeb4438" (UID: "988ef021-f985-428e-a4e2-1ef4cbeb4438"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.084455 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/988ef021-f985-428e-a4e2-1ef4cbeb4438-ceph" (OuterVolumeSpecName: "ceph") pod "988ef021-f985-428e-a4e2-1ef4cbeb4438" (UID: "988ef021-f985-428e-a4e2-1ef4cbeb4438"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.085745 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/988ef021-f985-428e-a4e2-1ef4cbeb4438-kube-api-access-6wzcw" (OuterVolumeSpecName: "kube-api-access-6wzcw") pod "988ef021-f985-428e-a4e2-1ef4cbeb4438" (UID: "988ef021-f985-428e-a4e2-1ef4cbeb4438"). InnerVolumeSpecName "kube-api-access-6wzcw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.098014 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "988ef021-f985-428e-a4e2-1ef4cbeb4438" (UID: "988ef021-f985-428e-a4e2-1ef4cbeb4438"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.172425 4800 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.172466 4800 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.172477 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/988ef021-f985-428e-a4e2-1ef4cbeb4438-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.172485 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wzcw\" (UniqueName: \"kubernetes.io/projected/988ef021-f985-428e-a4e2-1ef4cbeb4438-kube-api-access-6wzcw\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.178899 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "988ef021-f985-428e-a4e2-1ef4cbeb4438" (UID: "988ef021-f985-428e-a4e2-1ef4cbeb4438"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.191249 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-config-data" (OuterVolumeSpecName: "config-data") pod "988ef021-f985-428e-a4e2-1ef4cbeb4438" (UID: "988ef021-f985-428e-a4e2-1ef4cbeb4438"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.275069 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.275112 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/988ef021-f985-428e-a4e2-1ef4cbeb4438-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.932000 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.971097 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.984824 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.997819 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 16:14:09 crc kubenswrapper[4800]: E1125 16:14:09.998356 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="988ef021-f985-428e-a4e2-1ef4cbeb4438" containerName="manila-share" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.998378 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="988ef021-f985-428e-a4e2-1ef4cbeb4438" containerName="manila-share" Nov 25 16:14:09 crc kubenswrapper[4800]: E1125 16:14:09.998410 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="988ef021-f985-428e-a4e2-1ef4cbeb4438" containerName="probe" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.998417 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="988ef021-f985-428e-a4e2-1ef4cbeb4438" containerName="probe" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.998613 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="988ef021-f985-428e-a4e2-1ef4cbeb4438" containerName="manila-share" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.998648 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="988ef021-f985-428e-a4e2-1ef4cbeb4438" containerName="probe" Nov 25 16:14:09 crc kubenswrapper[4800]: I1125 16:14:09.999708 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.004948 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.012710 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.099535 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c50c89bb-6472-4178-a4db-32b109ae9847-config-data\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.100741 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/c50c89bb-6472-4178-a4db-32b109ae9847-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.101064 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c50c89bb-6472-4178-a4db-32b109ae9847-ceph\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.101101 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c50c89bb-6472-4178-a4db-32b109ae9847-scripts\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.101227 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c50c89bb-6472-4178-a4db-32b109ae9847-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.101625 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c50c89bb-6472-4178-a4db-32b109ae9847-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.101702 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56hss\" (UniqueName: \"kubernetes.io/projected/c50c89bb-6472-4178-a4db-32b109ae9847-kube-api-access-56hss\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.101943 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c50c89bb-6472-4178-a4db-32b109ae9847-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.204011 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c50c89bb-6472-4178-a4db-32b109ae9847-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.204077 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56hss\" (UniqueName: \"kubernetes.io/projected/c50c89bb-6472-4178-a4db-32b109ae9847-kube-api-access-56hss\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.204107 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c50c89bb-6472-4178-a4db-32b109ae9847-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.204207 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c50c89bb-6472-4178-a4db-32b109ae9847-config-data\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.204232 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/c50c89bb-6472-4178-a4db-32b109ae9847-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.204279 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c50c89bb-6472-4178-a4db-32b109ae9847-ceph\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.204313 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c50c89bb-6472-4178-a4db-32b109ae9847-scripts\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.204402 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c50c89bb-6472-4178-a4db-32b109ae9847-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.204397 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c50c89bb-6472-4178-a4db-32b109ae9847-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.204543 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/c50c89bb-6472-4178-a4db-32b109ae9847-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.211539 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c50c89bb-6472-4178-a4db-32b109ae9847-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.211612 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c50c89bb-6472-4178-a4db-32b109ae9847-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.212208 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c50c89bb-6472-4178-a4db-32b109ae9847-scripts\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.216432 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c50c89bb-6472-4178-a4db-32b109ae9847-ceph\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.217724 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c50c89bb-6472-4178-a4db-32b109ae9847-config-data\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.223377 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56hss\" (UniqueName: \"kubernetes.io/projected/c50c89bb-6472-4178-a4db-32b109ae9847-kube-api-access-56hss\") pod \"manila-share-share1-0\" (UID: \"c50c89bb-6472-4178-a4db-32b109ae9847\") " pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.390023 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 16:14:10 crc kubenswrapper[4800]: I1125 16:14:10.979013 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 16:14:10 crc kubenswrapper[4800]: W1125 16:14:10.980654 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc50c89bb_6472_4178_a4db_32b109ae9847.slice/crio-133eba23581497953fc7ac88eacc35175bb679d9716c3a377cf6bcd9bfbe2a68 WatchSource:0}: Error finding container 133eba23581497953fc7ac88eacc35175bb679d9716c3a377cf6bcd9bfbe2a68: Status 404 returned error can't find the container with id 133eba23581497953fc7ac88eacc35175bb679d9716c3a377cf6bcd9bfbe2a68 Nov 25 16:14:11 crc kubenswrapper[4800]: I1125 16:14:11.805888 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="988ef021-f985-428e-a4e2-1ef4cbeb4438" path="/var/lib/kubelet/pods/988ef021-f985-428e-a4e2-1ef4cbeb4438/volumes" Nov 25 16:14:11 crc kubenswrapper[4800]: I1125 16:14:11.961294 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"c50c89bb-6472-4178-a4db-32b109ae9847","Type":"ContainerStarted","Data":"3ae2e9b3c5c05111c124acec5958ef7cd82a4fb4beabea72a731c6f05f03465c"} Nov 25 16:14:11 crc kubenswrapper[4800]: I1125 16:14:11.961357 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"c50c89bb-6472-4178-a4db-32b109ae9847","Type":"ContainerStarted","Data":"133eba23581497953fc7ac88eacc35175bb679d9716c3a377cf6bcd9bfbe2a68"} Nov 25 16:14:12 crc kubenswrapper[4800]: I1125 16:14:12.979515 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"c50c89bb-6472-4178-a4db-32b109ae9847","Type":"ContainerStarted","Data":"9bffdbeb18b4855993de62d97290c60a819b11e2e19fcf778c39353a957a2142"} Nov 25 16:14:13 crc kubenswrapper[4800]: I1125 16:14:13.021814 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=4.021775806 podStartE2EDuration="4.021775806s" podCreationTimestamp="2025-11-25 16:14:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:14:13.004487987 +0000 UTC m=+3414.058896479" watchObservedRunningTime="2025-11-25 16:14:13.021775806 +0000 UTC m=+3414.076184328" Nov 25 16:14:14 crc kubenswrapper[4800]: I1125 16:14:14.276660 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 25 16:14:20 crc kubenswrapper[4800]: I1125 16:14:20.390459 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 16:14:26 crc kubenswrapper[4800]: I1125 16:14:26.086621 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 25 16:14:27 crc kubenswrapper[4800]: I1125 16:14:27.177333 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 16:14:32 crc kubenswrapper[4800]: I1125 16:14:32.146636 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.161613 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk"] Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.165286 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.169809 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.174733 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk"] Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.184354 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.237905 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-config-volume\") pod \"collect-profiles-29401455-7grwk\" (UID: \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.238144 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6h89\" (UniqueName: \"kubernetes.io/projected/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-kube-api-access-k6h89\") pod \"collect-profiles-29401455-7grwk\" (UID: \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.238226 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-secret-volume\") pod \"collect-profiles-29401455-7grwk\" (UID: \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.340578 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-config-volume\") pod \"collect-profiles-29401455-7grwk\" (UID: \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.340676 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6h89\" (UniqueName: \"kubernetes.io/projected/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-kube-api-access-k6h89\") pod \"collect-profiles-29401455-7grwk\" (UID: \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.340702 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-secret-volume\") pod \"collect-profiles-29401455-7grwk\" (UID: \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.342276 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-config-volume\") pod \"collect-profiles-29401455-7grwk\" (UID: \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.350981 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-secret-volume\") pod \"collect-profiles-29401455-7grwk\" (UID: \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.361434 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6h89\" (UniqueName: \"kubernetes.io/projected/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-kube-api-access-k6h89\") pod \"collect-profiles-29401455-7grwk\" (UID: \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:00 crc kubenswrapper[4800]: I1125 16:15:00.501823 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:01 crc kubenswrapper[4800]: I1125 16:15:01.010598 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk"] Nov 25 16:15:01 crc kubenswrapper[4800]: I1125 16:15:01.587800 4800 generic.go:334] "Generic (PLEG): container finished" podID="d81d0a97-3e2c-40a6-ba50-9b7798e7c05b" containerID="53f53b0cdcf5e13fb81c3a43e9b3598c65cf827cdd95d6a6eb3e2a17c5b2a7ca" exitCode=0 Nov 25 16:15:01 crc kubenswrapper[4800]: I1125 16:15:01.587882 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" event={"ID":"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b","Type":"ContainerDied","Data":"53f53b0cdcf5e13fb81c3a43e9b3598c65cf827cdd95d6a6eb3e2a17c5b2a7ca"} Nov 25 16:15:01 crc kubenswrapper[4800]: I1125 16:15:01.588243 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" event={"ID":"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b","Type":"ContainerStarted","Data":"c3dbe4858a4e110f800ee282eddf647343639c28ad27b9e06e7c22379fd50487"} Nov 25 16:15:02 crc kubenswrapper[4800]: I1125 16:15:02.994354 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:03 crc kubenswrapper[4800]: I1125 16:15:03.109233 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-secret-volume\") pod \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\" (UID: \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\") " Nov 25 16:15:03 crc kubenswrapper[4800]: I1125 16:15:03.110325 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6h89\" (UniqueName: \"kubernetes.io/projected/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-kube-api-access-k6h89\") pod \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\" (UID: \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\") " Nov 25 16:15:03 crc kubenswrapper[4800]: I1125 16:15:03.110519 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-config-volume\") pod \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\" (UID: \"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b\") " Nov 25 16:15:03 crc kubenswrapper[4800]: I1125 16:15:03.111202 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-config-volume" (OuterVolumeSpecName: "config-volume") pod "d81d0a97-3e2c-40a6-ba50-9b7798e7c05b" (UID: "d81d0a97-3e2c-40a6-ba50-9b7798e7c05b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:15:03 crc kubenswrapper[4800]: I1125 16:15:03.111932 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 16:15:03 crc kubenswrapper[4800]: I1125 16:15:03.118115 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-kube-api-access-k6h89" (OuterVolumeSpecName: "kube-api-access-k6h89") pod "d81d0a97-3e2c-40a6-ba50-9b7798e7c05b" (UID: "d81d0a97-3e2c-40a6-ba50-9b7798e7c05b"). InnerVolumeSpecName "kube-api-access-k6h89". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:15:03 crc kubenswrapper[4800]: I1125 16:15:03.121294 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d81d0a97-3e2c-40a6-ba50-9b7798e7c05b" (UID: "d81d0a97-3e2c-40a6-ba50-9b7798e7c05b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:15:03 crc kubenswrapper[4800]: I1125 16:15:03.214563 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 16:15:03 crc kubenswrapper[4800]: I1125 16:15:03.214634 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6h89\" (UniqueName: \"kubernetes.io/projected/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b-kube-api-access-k6h89\") on node \"crc\" DevicePath \"\"" Nov 25 16:15:03 crc kubenswrapper[4800]: I1125 16:15:03.615511 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" event={"ID":"d81d0a97-3e2c-40a6-ba50-9b7798e7c05b","Type":"ContainerDied","Data":"c3dbe4858a4e110f800ee282eddf647343639c28ad27b9e06e7c22379fd50487"} Nov 25 16:15:03 crc kubenswrapper[4800]: I1125 16:15:03.615569 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c3dbe4858a4e110f800ee282eddf647343639c28ad27b9e06e7c22379fd50487" Nov 25 16:15:03 crc kubenswrapper[4800]: I1125 16:15:03.615625 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk" Nov 25 16:15:04 crc kubenswrapper[4800]: I1125 16:15:04.081428 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8"] Nov 25 16:15:04 crc kubenswrapper[4800]: I1125 16:15:04.090490 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401410-nwkd8"] Nov 25 16:15:05 crc kubenswrapper[4800]: I1125 16:15:05.806269 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80c53442-63bf-4ab5-815a-cf84b18e3464" path="/var/lib/kubelet/pods/80c53442-63bf-4ab5-815a-cf84b18e3464/volumes" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.316409 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dgxsd"] Nov 25 16:15:13 crc kubenswrapper[4800]: E1125 16:15:13.317649 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d81d0a97-3e2c-40a6-ba50-9b7798e7c05b" containerName="collect-profiles" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.317664 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="d81d0a97-3e2c-40a6-ba50-9b7798e7c05b" containerName="collect-profiles" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.317905 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="d81d0a97-3e2c-40a6-ba50-9b7798e7c05b" containerName="collect-profiles" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.320022 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.343138 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dgxsd"] Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.390650 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2kqf\" (UniqueName: \"kubernetes.io/projected/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-kube-api-access-p2kqf\") pod \"certified-operators-dgxsd\" (UID: \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\") " pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.391349 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-catalog-content\") pod \"certified-operators-dgxsd\" (UID: \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\") " pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.391455 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-utilities\") pod \"certified-operators-dgxsd\" (UID: \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\") " pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.494487 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-catalog-content\") pod \"certified-operators-dgxsd\" (UID: \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\") " pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.494560 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-utilities\") pod \"certified-operators-dgxsd\" (UID: \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\") " pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.494667 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2kqf\" (UniqueName: \"kubernetes.io/projected/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-kube-api-access-p2kqf\") pod \"certified-operators-dgxsd\" (UID: \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\") " pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.495649 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-catalog-content\") pod \"certified-operators-dgxsd\" (UID: \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\") " pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.495890 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-utilities\") pod \"certified-operators-dgxsd\" (UID: \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\") " pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.532212 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2kqf\" (UniqueName: \"kubernetes.io/projected/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-kube-api-access-p2kqf\") pod \"certified-operators-dgxsd\" (UID: \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\") " pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:13 crc kubenswrapper[4800]: I1125 16:15:13.644609 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:14 crc kubenswrapper[4800]: I1125 16:15:14.214068 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dgxsd"] Nov 25 16:15:14 crc kubenswrapper[4800]: I1125 16:15:14.750179 4800 generic.go:334] "Generic (PLEG): container finished" podID="8b23ac64-f4da-4ca5-be30-c51b1b53cae7" containerID="a99cdec9483ee54c6ff279bd428968697336b29af265e8f48b7b8d05cdc70447" exitCode=0 Nov 25 16:15:14 crc kubenswrapper[4800]: I1125 16:15:14.750234 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgxsd" event={"ID":"8b23ac64-f4da-4ca5-be30-c51b1b53cae7","Type":"ContainerDied","Data":"a99cdec9483ee54c6ff279bd428968697336b29af265e8f48b7b8d05cdc70447"} Nov 25 16:15:14 crc kubenswrapper[4800]: I1125 16:15:14.750604 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgxsd" event={"ID":"8b23ac64-f4da-4ca5-be30-c51b1b53cae7","Type":"ContainerStarted","Data":"974dba71cdae03d021e23b0b25723cae31409b1d29c29b510da9007e86fce9a0"} Nov 25 16:15:14 crc kubenswrapper[4800]: I1125 16:15:14.752739 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 16:15:15 crc kubenswrapper[4800]: I1125 16:15:15.765294 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgxsd" event={"ID":"8b23ac64-f4da-4ca5-be30-c51b1b53cae7","Type":"ContainerStarted","Data":"e613342588187ec1bf772f1260805a8ac92a314ea84c41ee2087d8e04cdc10be"} Nov 25 16:15:16 crc kubenswrapper[4800]: I1125 16:15:16.780167 4800 generic.go:334] "Generic (PLEG): container finished" podID="8b23ac64-f4da-4ca5-be30-c51b1b53cae7" containerID="e613342588187ec1bf772f1260805a8ac92a314ea84c41ee2087d8e04cdc10be" exitCode=0 Nov 25 16:15:16 crc kubenswrapper[4800]: I1125 16:15:16.780347 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgxsd" event={"ID":"8b23ac64-f4da-4ca5-be30-c51b1b53cae7","Type":"ContainerDied","Data":"e613342588187ec1bf772f1260805a8ac92a314ea84c41ee2087d8e04cdc10be"} Nov 25 16:15:17 crc kubenswrapper[4800]: I1125 16:15:17.801363 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgxsd" event={"ID":"8b23ac64-f4da-4ca5-be30-c51b1b53cae7","Type":"ContainerStarted","Data":"0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076"} Nov 25 16:15:17 crc kubenswrapper[4800]: I1125 16:15:17.826364 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dgxsd" podStartSLOduration=2.380506907 podStartE2EDuration="4.826339212s" podCreationTimestamp="2025-11-25 16:15:13 +0000 UTC" firstStartedPulling="2025-11-25 16:15:14.752505032 +0000 UTC m=+3475.806913514" lastFinishedPulling="2025-11-25 16:15:17.198337307 +0000 UTC m=+3478.252745819" observedRunningTime="2025-11-25 16:15:17.81924766 +0000 UTC m=+3478.873656142" watchObservedRunningTime="2025-11-25 16:15:17.826339212 +0000 UTC m=+3478.880747684" Nov 25 16:15:23 crc kubenswrapper[4800]: I1125 16:15:23.644759 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:23 crc kubenswrapper[4800]: I1125 16:15:23.645580 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:23 crc kubenswrapper[4800]: I1125 16:15:23.745218 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:23 crc kubenswrapper[4800]: I1125 16:15:23.959081 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:24 crc kubenswrapper[4800]: I1125 16:15:24.016874 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dgxsd"] Nov 25 16:15:25 crc kubenswrapper[4800]: I1125 16:15:25.911923 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dgxsd" podUID="8b23ac64-f4da-4ca5-be30-c51b1b53cae7" containerName="registry-server" containerID="cri-o://0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076" gracePeriod=2 Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.434711 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.618153 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-catalog-content\") pod \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\" (UID: \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\") " Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.618377 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2kqf\" (UniqueName: \"kubernetes.io/projected/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-kube-api-access-p2kqf\") pod \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\" (UID: \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\") " Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.618580 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-utilities\") pod \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\" (UID: \"8b23ac64-f4da-4ca5-be30-c51b1b53cae7\") " Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.619971 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-utilities" (OuterVolumeSpecName: "utilities") pod "8b23ac64-f4da-4ca5-be30-c51b1b53cae7" (UID: "8b23ac64-f4da-4ca5-be30-c51b1b53cae7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.626084 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-kube-api-access-p2kqf" (OuterVolumeSpecName: "kube-api-access-p2kqf") pod "8b23ac64-f4da-4ca5-be30-c51b1b53cae7" (UID: "8b23ac64-f4da-4ca5-be30-c51b1b53cae7"). InnerVolumeSpecName "kube-api-access-p2kqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.674808 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8b23ac64-f4da-4ca5-be30-c51b1b53cae7" (UID: "8b23ac64-f4da-4ca5-be30-c51b1b53cae7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.721131 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2kqf\" (UniqueName: \"kubernetes.io/projected/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-kube-api-access-p2kqf\") on node \"crc\" DevicePath \"\"" Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.721167 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.721177 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b23ac64-f4da-4ca5-be30-c51b1b53cae7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.927182 4800 generic.go:334] "Generic (PLEG): container finished" podID="8b23ac64-f4da-4ca5-be30-c51b1b53cae7" containerID="0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076" exitCode=0 Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.927229 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgxsd" event={"ID":"8b23ac64-f4da-4ca5-be30-c51b1b53cae7","Type":"ContainerDied","Data":"0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076"} Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.927299 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgxsd" event={"ID":"8b23ac64-f4da-4ca5-be30-c51b1b53cae7","Type":"ContainerDied","Data":"974dba71cdae03d021e23b0b25723cae31409b1d29c29b510da9007e86fce9a0"} Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.927320 4800 scope.go:117] "RemoveContainer" containerID="0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076" Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.927464 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dgxsd" Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.962960 4800 scope.go:117] "RemoveContainer" containerID="e613342588187ec1bf772f1260805a8ac92a314ea84c41ee2087d8e04cdc10be" Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.966430 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dgxsd"] Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.978985 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dgxsd"] Nov 25 16:15:26 crc kubenswrapper[4800]: I1125 16:15:26.988082 4800 scope.go:117] "RemoveContainer" containerID="a99cdec9483ee54c6ff279bd428968697336b29af265e8f48b7b8d05cdc70447" Nov 25 16:15:27 crc kubenswrapper[4800]: I1125 16:15:27.044597 4800 scope.go:117] "RemoveContainer" containerID="0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076" Nov 25 16:15:27 crc kubenswrapper[4800]: E1125 16:15:27.045410 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076\": container with ID starting with 0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076 not found: ID does not exist" containerID="0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076" Nov 25 16:15:27 crc kubenswrapper[4800]: I1125 16:15:27.045481 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076"} err="failed to get container status \"0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076\": rpc error: code = NotFound desc = could not find container \"0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076\": container with ID starting with 0b15dae36edc11e0cf913cc3d23c2e44cb8d3c65a0b5d1f236d83b8adc927076 not found: ID does not exist" Nov 25 16:15:27 crc kubenswrapper[4800]: I1125 16:15:27.045517 4800 scope.go:117] "RemoveContainer" containerID="e613342588187ec1bf772f1260805a8ac92a314ea84c41ee2087d8e04cdc10be" Nov 25 16:15:27 crc kubenswrapper[4800]: E1125 16:15:27.046005 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e613342588187ec1bf772f1260805a8ac92a314ea84c41ee2087d8e04cdc10be\": container with ID starting with e613342588187ec1bf772f1260805a8ac92a314ea84c41ee2087d8e04cdc10be not found: ID does not exist" containerID="e613342588187ec1bf772f1260805a8ac92a314ea84c41ee2087d8e04cdc10be" Nov 25 16:15:27 crc kubenswrapper[4800]: I1125 16:15:27.046061 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e613342588187ec1bf772f1260805a8ac92a314ea84c41ee2087d8e04cdc10be"} err="failed to get container status \"e613342588187ec1bf772f1260805a8ac92a314ea84c41ee2087d8e04cdc10be\": rpc error: code = NotFound desc = could not find container \"e613342588187ec1bf772f1260805a8ac92a314ea84c41ee2087d8e04cdc10be\": container with ID starting with e613342588187ec1bf772f1260805a8ac92a314ea84c41ee2087d8e04cdc10be not found: ID does not exist" Nov 25 16:15:27 crc kubenswrapper[4800]: I1125 16:15:27.046106 4800 scope.go:117] "RemoveContainer" containerID="a99cdec9483ee54c6ff279bd428968697336b29af265e8f48b7b8d05cdc70447" Nov 25 16:15:27 crc kubenswrapper[4800]: E1125 16:15:27.046809 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a99cdec9483ee54c6ff279bd428968697336b29af265e8f48b7b8d05cdc70447\": container with ID starting with a99cdec9483ee54c6ff279bd428968697336b29af265e8f48b7b8d05cdc70447 not found: ID does not exist" containerID="a99cdec9483ee54c6ff279bd428968697336b29af265e8f48b7b8d05cdc70447" Nov 25 16:15:27 crc kubenswrapper[4800]: I1125 16:15:27.046863 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a99cdec9483ee54c6ff279bd428968697336b29af265e8f48b7b8d05cdc70447"} err="failed to get container status \"a99cdec9483ee54c6ff279bd428968697336b29af265e8f48b7b8d05cdc70447\": rpc error: code = NotFound desc = could not find container \"a99cdec9483ee54c6ff279bd428968697336b29af265e8f48b7b8d05cdc70447\": container with ID starting with a99cdec9483ee54c6ff279bd428968697336b29af265e8f48b7b8d05cdc70447 not found: ID does not exist" Nov 25 16:15:27 crc kubenswrapper[4800]: I1125 16:15:27.802576 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b23ac64-f4da-4ca5-be30-c51b1b53cae7" path="/var/lib/kubelet/pods/8b23ac64-f4da-4ca5-be30-c51b1b53cae7/volumes" Nov 25 16:15:28 crc kubenswrapper[4800]: I1125 16:15:28.939886 4800 scope.go:117] "RemoveContainer" containerID="6c9f10cba91f358845ee3cf2ed9103a68565cf8220f2e0b13efce486fdf0e134" Nov 25 16:15:29 crc kubenswrapper[4800]: I1125 16:15:29.755614 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9"] Nov 25 16:15:29 crc kubenswrapper[4800]: E1125 16:15:29.756594 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b23ac64-f4da-4ca5-be30-c51b1b53cae7" containerName="registry-server" Nov 25 16:15:29 crc kubenswrapper[4800]: I1125 16:15:29.756612 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b23ac64-f4da-4ca5-be30-c51b1b53cae7" containerName="registry-server" Nov 25 16:15:29 crc kubenswrapper[4800]: E1125 16:15:29.756640 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b23ac64-f4da-4ca5-be30-c51b1b53cae7" containerName="extract-content" Nov 25 16:15:29 crc kubenswrapper[4800]: I1125 16:15:29.756648 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b23ac64-f4da-4ca5-be30-c51b1b53cae7" containerName="extract-content" Nov 25 16:15:29 crc kubenswrapper[4800]: E1125 16:15:29.756669 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b23ac64-f4da-4ca5-be30-c51b1b53cae7" containerName="extract-utilities" Nov 25 16:15:29 crc kubenswrapper[4800]: I1125 16:15:29.756675 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b23ac64-f4da-4ca5-be30-c51b1b53cae7" containerName="extract-utilities" Nov 25 16:15:29 crc kubenswrapper[4800]: I1125 16:15:29.757329 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b23ac64-f4da-4ca5-be30-c51b1b53cae7" containerName="registry-server" Nov 25 16:15:29 crc kubenswrapper[4800]: I1125 16:15:29.758527 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9" Nov 25 16:15:29 crc kubenswrapper[4800]: I1125 16:15:29.807930 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9"] Nov 25 16:15:29 crc kubenswrapper[4800]: I1125 16:15:29.896879 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tslb4\" (UniqueName: \"kubernetes.io/projected/af1e8320-14a4-4a15-b29c-39a09ce9bfb9-kube-api-access-tslb4\") pod \"openstack-operator-controller-operator-5ff895d7c-qbck9\" (UID: \"af1e8320-14a4-4a15-b29c-39a09ce9bfb9\") " pod="openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9" Nov 25 16:15:29 crc kubenswrapper[4800]: I1125 16:15:29.999336 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tslb4\" (UniqueName: \"kubernetes.io/projected/af1e8320-14a4-4a15-b29c-39a09ce9bfb9-kube-api-access-tslb4\") pod \"openstack-operator-controller-operator-5ff895d7c-qbck9\" (UID: \"af1e8320-14a4-4a15-b29c-39a09ce9bfb9\") " pod="openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9" Nov 25 16:15:30 crc kubenswrapper[4800]: I1125 16:15:30.020503 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tslb4\" (UniqueName: \"kubernetes.io/projected/af1e8320-14a4-4a15-b29c-39a09ce9bfb9-kube-api-access-tslb4\") pod \"openstack-operator-controller-operator-5ff895d7c-qbck9\" (UID: \"af1e8320-14a4-4a15-b29c-39a09ce9bfb9\") " pod="openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9" Nov 25 16:15:30 crc kubenswrapper[4800]: I1125 16:15:30.083028 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9" Nov 25 16:15:30 crc kubenswrapper[4800]: I1125 16:15:30.571175 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9"] Nov 25 16:15:30 crc kubenswrapper[4800]: I1125 16:15:30.973033 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9" event={"ID":"af1e8320-14a4-4a15-b29c-39a09ce9bfb9","Type":"ContainerStarted","Data":"1e57390b3314ff1107b81586eea474fcabf3dc74881cf7a21febc718391c024d"} Nov 25 16:15:30 crc kubenswrapper[4800]: I1125 16:15:30.973766 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9" Nov 25 16:15:30 crc kubenswrapper[4800]: I1125 16:15:30.973790 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9" event={"ID":"af1e8320-14a4-4a15-b29c-39a09ce9bfb9","Type":"ContainerStarted","Data":"d2bacb91da2a4f264f04e2e84158decb2b4087635e89c99a8abe54985c5e35e8"} Nov 25 16:15:31 crc kubenswrapper[4800]: I1125 16:15:31.009690 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9" podStartSLOduration=2.009664065 podStartE2EDuration="2.009664065s" podCreationTimestamp="2025-11-25 16:15:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:15:31.008083423 +0000 UTC m=+3492.062491925" watchObservedRunningTime="2025-11-25 16:15:31.009664065 +0000 UTC m=+3492.064072547" Nov 25 16:15:40 crc kubenswrapper[4800]: I1125 16:15:40.087558 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5ff895d7c-qbck9" Nov 25 16:15:40 crc kubenswrapper[4800]: I1125 16:15:40.183702 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf"] Nov 25 16:15:40 crc kubenswrapper[4800]: I1125 16:15:40.184173 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" podUID="a8c767e3-c848-47bd-b73c-a48f11f634db" containerName="operator" containerID="cri-o://d5e6e9dcbde9b34b5c773a3d7dc0456d7889f3d6d50c3f3f2e835a716edfb1ab" gracePeriod=10 Nov 25 16:15:41 crc kubenswrapper[4800]: I1125 16:15:41.070754 4800 generic.go:334] "Generic (PLEG): container finished" podID="a8c767e3-c848-47bd-b73c-a48f11f634db" containerID="d5e6e9dcbde9b34b5c773a3d7dc0456d7889f3d6d50c3f3f2e835a716edfb1ab" exitCode=0 Nov 25 16:15:41 crc kubenswrapper[4800]: I1125 16:15:41.070819 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" event={"ID":"a8c767e3-c848-47bd-b73c-a48f11f634db","Type":"ContainerDied","Data":"d5e6e9dcbde9b34b5c773a3d7dc0456d7889f3d6d50c3f3f2e835a716edfb1ab"} Nov 25 16:15:41 crc kubenswrapper[4800]: I1125 16:15:41.188806 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" Nov 25 16:15:41 crc kubenswrapper[4800]: I1125 16:15:41.294361 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bjjh\" (UniqueName: \"kubernetes.io/projected/a8c767e3-c848-47bd-b73c-a48f11f634db-kube-api-access-5bjjh\") pod \"a8c767e3-c848-47bd-b73c-a48f11f634db\" (UID: \"a8c767e3-c848-47bd-b73c-a48f11f634db\") " Nov 25 16:15:41 crc kubenswrapper[4800]: I1125 16:15:41.303353 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8c767e3-c848-47bd-b73c-a48f11f634db-kube-api-access-5bjjh" (OuterVolumeSpecName: "kube-api-access-5bjjh") pod "a8c767e3-c848-47bd-b73c-a48f11f634db" (UID: "a8c767e3-c848-47bd-b73c-a48f11f634db"). InnerVolumeSpecName "kube-api-access-5bjjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:15:41 crc kubenswrapper[4800]: I1125 16:15:41.397031 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bjjh\" (UniqueName: \"kubernetes.io/projected/a8c767e3-c848-47bd-b73c-a48f11f634db-kube-api-access-5bjjh\") on node \"crc\" DevicePath \"\"" Nov 25 16:15:42 crc kubenswrapper[4800]: I1125 16:15:42.086740 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" event={"ID":"a8c767e3-c848-47bd-b73c-a48f11f634db","Type":"ContainerDied","Data":"57dc102c61a91d138789712942747cad402900f2b556bbb62e30c4670a4b8c82"} Nov 25 16:15:42 crc kubenswrapper[4800]: I1125 16:15:42.086887 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf" Nov 25 16:15:42 crc kubenswrapper[4800]: I1125 16:15:42.087281 4800 scope.go:117] "RemoveContainer" containerID="d5e6e9dcbde9b34b5c773a3d7dc0456d7889f3d6d50c3f3f2e835a716edfb1ab" Nov 25 16:15:42 crc kubenswrapper[4800]: I1125 16:15:42.122751 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf"] Nov 25 16:15:42 crc kubenswrapper[4800]: I1125 16:15:42.131110 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b567956b5-2vtsf"] Nov 25 16:15:43 crc kubenswrapper[4800]: I1125 16:15:43.798373 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8c767e3-c848-47bd-b73c-a48f11f634db" path="/var/lib/kubelet/pods/a8c767e3-c848-47bd-b73c-a48f11f634db/volumes" Nov 25 16:16:11 crc kubenswrapper[4800]: I1125 16:16:11.091865 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd"] Nov 25 16:16:11 crc kubenswrapper[4800]: E1125 16:16:11.093474 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c767e3-c848-47bd-b73c-a48f11f634db" containerName="operator" Nov 25 16:16:11 crc kubenswrapper[4800]: I1125 16:16:11.093496 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c767e3-c848-47bd-b73c-a48f11f634db" containerName="operator" Nov 25 16:16:11 crc kubenswrapper[4800]: I1125 16:16:11.093794 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8c767e3-c848-47bd-b73c-a48f11f634db" containerName="operator" Nov 25 16:16:11 crc kubenswrapper[4800]: I1125 16:16:11.095725 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd" Nov 25 16:16:11 crc kubenswrapper[4800]: I1125 16:16:11.109770 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd"] Nov 25 16:16:11 crc kubenswrapper[4800]: I1125 16:16:11.279656 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdjf9\" (UniqueName: \"kubernetes.io/projected/8394c97f-b95f-41cd-8baa-b6bdb3a2219a-kube-api-access-bdjf9\") pod \"test-operator-controller-manager-69c4569b4-2wcmd\" (UID: \"8394c97f-b95f-41cd-8baa-b6bdb3a2219a\") " pod="openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd" Nov 25 16:16:11 crc kubenswrapper[4800]: I1125 16:16:11.382408 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdjf9\" (UniqueName: \"kubernetes.io/projected/8394c97f-b95f-41cd-8baa-b6bdb3a2219a-kube-api-access-bdjf9\") pod \"test-operator-controller-manager-69c4569b4-2wcmd\" (UID: \"8394c97f-b95f-41cd-8baa-b6bdb3a2219a\") " pod="openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd" Nov 25 16:16:11 crc kubenswrapper[4800]: I1125 16:16:11.412279 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdjf9\" (UniqueName: \"kubernetes.io/projected/8394c97f-b95f-41cd-8baa-b6bdb3a2219a-kube-api-access-bdjf9\") pod \"test-operator-controller-manager-69c4569b4-2wcmd\" (UID: \"8394c97f-b95f-41cd-8baa-b6bdb3a2219a\") " pod="openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd" Nov 25 16:16:11 crc kubenswrapper[4800]: I1125 16:16:11.502156 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd" Nov 25 16:16:12 crc kubenswrapper[4800]: I1125 16:16:12.066444 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd"] Nov 25 16:16:12 crc kubenswrapper[4800]: I1125 16:16:12.422651 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd" event={"ID":"8394c97f-b95f-41cd-8baa-b6bdb3a2219a","Type":"ContainerStarted","Data":"10a686b382260be5a1143755bf358cbe623710eaee06b7287149add4f1f2ddb6"} Nov 25 16:16:12 crc kubenswrapper[4800]: I1125 16:16:12.640607 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:16:12 crc kubenswrapper[4800]: I1125 16:16:12.640701 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:16:15 crc kubenswrapper[4800]: I1125 16:16:15.466652 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd" event={"ID":"8394c97f-b95f-41cd-8baa-b6bdb3a2219a","Type":"ContainerStarted","Data":"da0eb78d84afd55b760e0f0365b90539645968ebfa0ee29ffe415ae177563ce4"} Nov 25 16:16:16 crc kubenswrapper[4800]: I1125 16:16:16.480102 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd" event={"ID":"8394c97f-b95f-41cd-8baa-b6bdb3a2219a","Type":"ContainerStarted","Data":"6d5dc8866d0b8570b5845d1078f8a0cf94e168c159adb5b99f57ae32ca8a306b"} Nov 25 16:16:16 crc kubenswrapper[4800]: I1125 16:16:16.480566 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd" Nov 25 16:16:16 crc kubenswrapper[4800]: I1125 16:16:16.508906 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd" podStartSLOduration=2.441089972 podStartE2EDuration="5.508869329s" podCreationTimestamp="2025-11-25 16:16:11 +0000 UTC" firstStartedPulling="2025-11-25 16:16:12.08375201 +0000 UTC m=+3533.138160492" lastFinishedPulling="2025-11-25 16:16:15.151531367 +0000 UTC m=+3536.205939849" observedRunningTime="2025-11-25 16:16:16.497585351 +0000 UTC m=+3537.551993833" watchObservedRunningTime="2025-11-25 16:16:16.508869329 +0000 UTC m=+3537.563277811" Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.351174 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b7pxg"] Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.353633 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.373641 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7pxg"] Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.463520 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w88d6\" (UniqueName: \"kubernetes.io/projected/b36846d7-cb41-4c63-8bd9-7d728cae0459-kube-api-access-w88d6\") pod \"redhat-marketplace-b7pxg\" (UID: \"b36846d7-cb41-4c63-8bd9-7d728cae0459\") " pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.463595 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b36846d7-cb41-4c63-8bd9-7d728cae0459-utilities\") pod \"redhat-marketplace-b7pxg\" (UID: \"b36846d7-cb41-4c63-8bd9-7d728cae0459\") " pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.463691 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b36846d7-cb41-4c63-8bd9-7d728cae0459-catalog-content\") pod \"redhat-marketplace-b7pxg\" (UID: \"b36846d7-cb41-4c63-8bd9-7d728cae0459\") " pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.566305 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w88d6\" (UniqueName: \"kubernetes.io/projected/b36846d7-cb41-4c63-8bd9-7d728cae0459-kube-api-access-w88d6\") pod \"redhat-marketplace-b7pxg\" (UID: \"b36846d7-cb41-4c63-8bd9-7d728cae0459\") " pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.566725 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b36846d7-cb41-4c63-8bd9-7d728cae0459-utilities\") pod \"redhat-marketplace-b7pxg\" (UID: \"b36846d7-cb41-4c63-8bd9-7d728cae0459\") " pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.566984 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b36846d7-cb41-4c63-8bd9-7d728cae0459-catalog-content\") pod \"redhat-marketplace-b7pxg\" (UID: \"b36846d7-cb41-4c63-8bd9-7d728cae0459\") " pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.567486 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b36846d7-cb41-4c63-8bd9-7d728cae0459-utilities\") pod \"redhat-marketplace-b7pxg\" (UID: \"b36846d7-cb41-4c63-8bd9-7d728cae0459\") " pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.567572 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b36846d7-cb41-4c63-8bd9-7d728cae0459-catalog-content\") pod \"redhat-marketplace-b7pxg\" (UID: \"b36846d7-cb41-4c63-8bd9-7d728cae0459\") " pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.591267 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w88d6\" (UniqueName: \"kubernetes.io/projected/b36846d7-cb41-4c63-8bd9-7d728cae0459-kube-api-access-w88d6\") pod \"redhat-marketplace-b7pxg\" (UID: \"b36846d7-cb41-4c63-8bd9-7d728cae0459\") " pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:18 crc kubenswrapper[4800]: I1125 16:16:18.676685 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:19 crc kubenswrapper[4800]: I1125 16:16:19.200412 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7pxg"] Nov 25 16:16:19 crc kubenswrapper[4800]: W1125 16:16:19.204063 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb36846d7_cb41_4c63_8bd9_7d728cae0459.slice/crio-d60e44a6088bd8d112e0670474109d12c1440b9fb9126ebddb22798096942f70 WatchSource:0}: Error finding container d60e44a6088bd8d112e0670474109d12c1440b9fb9126ebddb22798096942f70: Status 404 returned error can't find the container with id d60e44a6088bd8d112e0670474109d12c1440b9fb9126ebddb22798096942f70 Nov 25 16:16:19 crc kubenswrapper[4800]: I1125 16:16:19.512442 4800 generic.go:334] "Generic (PLEG): container finished" podID="b36846d7-cb41-4c63-8bd9-7d728cae0459" containerID="a9dafb9c65fb6a72288eecd6567bf57a1812a3c5bc34bc618b71523ebdc74ad3" exitCode=0 Nov 25 16:16:19 crc kubenswrapper[4800]: I1125 16:16:19.512512 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7pxg" event={"ID":"b36846d7-cb41-4c63-8bd9-7d728cae0459","Type":"ContainerDied","Data":"a9dafb9c65fb6a72288eecd6567bf57a1812a3c5bc34bc618b71523ebdc74ad3"} Nov 25 16:16:19 crc kubenswrapper[4800]: I1125 16:16:19.512551 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7pxg" event={"ID":"b36846d7-cb41-4c63-8bd9-7d728cae0459","Type":"ContainerStarted","Data":"d60e44a6088bd8d112e0670474109d12c1440b9fb9126ebddb22798096942f70"} Nov 25 16:16:20 crc kubenswrapper[4800]: I1125 16:16:20.530861 4800 generic.go:334] "Generic (PLEG): container finished" podID="b36846d7-cb41-4c63-8bd9-7d728cae0459" containerID="a5a80765790afb2e60b9ce50864c02ca969a70a472bf863449680b2a6cd48a51" exitCode=0 Nov 25 16:16:20 crc kubenswrapper[4800]: I1125 16:16:20.531552 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7pxg" event={"ID":"b36846d7-cb41-4c63-8bd9-7d728cae0459","Type":"ContainerDied","Data":"a5a80765790afb2e60b9ce50864c02ca969a70a472bf863449680b2a6cd48a51"} Nov 25 16:16:21 crc kubenswrapper[4800]: I1125 16:16:21.506700 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-69c4569b4-2wcmd" Nov 25 16:16:21 crc kubenswrapper[4800]: I1125 16:16:21.558750 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7pxg" event={"ID":"b36846d7-cb41-4c63-8bd9-7d728cae0459","Type":"ContainerStarted","Data":"87a0add99310f0182edb4cd0d6b7b60e13047889b299d05d8a4215fa31aa3f43"} Nov 25 16:16:21 crc kubenswrapper[4800]: I1125 16:16:21.585642 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v"] Nov 25 16:16:21 crc kubenswrapper[4800]: I1125 16:16:21.586104 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" podUID="51bc0d61-40c5-404a-978c-414717c8e3e9" containerName="manager" containerID="cri-o://049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb" gracePeriod=10 Nov 25 16:16:21 crc kubenswrapper[4800]: I1125 16:16:21.586246 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" podUID="51bc0d61-40c5-404a-978c-414717c8e3e9" containerName="kube-rbac-proxy" containerID="cri-o://1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1" gracePeriod=10 Nov 25 16:16:21 crc kubenswrapper[4800]: I1125 16:16:21.601199 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b7pxg" podStartSLOduration=2.147456578 podStartE2EDuration="3.601171855s" podCreationTimestamp="2025-11-25 16:16:18 +0000 UTC" firstStartedPulling="2025-11-25 16:16:19.514566998 +0000 UTC m=+3540.568975480" lastFinishedPulling="2025-11-25 16:16:20.968282275 +0000 UTC m=+3542.022690757" observedRunningTime="2025-11-25 16:16:21.599299974 +0000 UTC m=+3542.653708476" watchObservedRunningTime="2025-11-25 16:16:21.601171855 +0000 UTC m=+3542.655580337" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.114329 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.259647 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h9nn7\" (UniqueName: \"kubernetes.io/projected/51bc0d61-40c5-404a-978c-414717c8e3e9-kube-api-access-h9nn7\") pod \"51bc0d61-40c5-404a-978c-414717c8e3e9\" (UID: \"51bc0d61-40c5-404a-978c-414717c8e3e9\") " Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.271177 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51bc0d61-40c5-404a-978c-414717c8e3e9-kube-api-access-h9nn7" (OuterVolumeSpecName: "kube-api-access-h9nn7") pod "51bc0d61-40c5-404a-978c-414717c8e3e9" (UID: "51bc0d61-40c5-404a-978c-414717c8e3e9"). InnerVolumeSpecName "kube-api-access-h9nn7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.363962 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h9nn7\" (UniqueName: \"kubernetes.io/projected/51bc0d61-40c5-404a-978c-414717c8e3e9-kube-api-access-h9nn7\") on node \"crc\" DevicePath \"\"" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.570798 4800 generic.go:334] "Generic (PLEG): container finished" podID="51bc0d61-40c5-404a-978c-414717c8e3e9" containerID="1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1" exitCode=0 Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.571238 4800 generic.go:334] "Generic (PLEG): container finished" podID="51bc0d61-40c5-404a-978c-414717c8e3e9" containerID="049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb" exitCode=0 Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.570895 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" event={"ID":"51bc0d61-40c5-404a-978c-414717c8e3e9","Type":"ContainerDied","Data":"1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1"} Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.570943 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.571883 4800 scope.go:117] "RemoveContainer" containerID="1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.571827 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" event={"ID":"51bc0d61-40c5-404a-978c-414717c8e3e9","Type":"ContainerDied","Data":"049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb"} Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.572129 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v" event={"ID":"51bc0d61-40c5-404a-978c-414717c8e3e9","Type":"ContainerDied","Data":"bed86b3816295893c4739fcfa2153269f661c1382e3b0690817f8a5cf0c33eba"} Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.610645 4800 scope.go:117] "RemoveContainer" containerID="049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.614071 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v"] Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.624047 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-kxm4v"] Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.631232 4800 scope.go:117] "RemoveContainer" containerID="1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1" Nov 25 16:16:22 crc kubenswrapper[4800]: E1125 16:16:22.631901 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1\": container with ID starting with 1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1 not found: ID does not exist" containerID="1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.631948 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1"} err="failed to get container status \"1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1\": rpc error: code = NotFound desc = could not find container \"1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1\": container with ID starting with 1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1 not found: ID does not exist" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.631972 4800 scope.go:117] "RemoveContainer" containerID="049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb" Nov 25 16:16:22 crc kubenswrapper[4800]: E1125 16:16:22.632373 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb\": container with ID starting with 049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb not found: ID does not exist" containerID="049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.632404 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb"} err="failed to get container status \"049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb\": rpc error: code = NotFound desc = could not find container \"049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb\": container with ID starting with 049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb not found: ID does not exist" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.632421 4800 scope.go:117] "RemoveContainer" containerID="1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.632816 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1"} err="failed to get container status \"1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1\": rpc error: code = NotFound desc = could not find container \"1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1\": container with ID starting with 1a2f931dae43b8df2e44aef1e3ab53536567bd7dc9a5347b56d879c9984f6bb1 not found: ID does not exist" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.632866 4800 scope.go:117] "RemoveContainer" containerID="049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb" Nov 25 16:16:22 crc kubenswrapper[4800]: I1125 16:16:22.633205 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb"} err="failed to get container status \"049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb\": rpc error: code = NotFound desc = could not find container \"049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb\": container with ID starting with 049e21b307c002def8de5c866c8dc20631a2a6f05c907a4ceb29de8d6a0360eb not found: ID does not exist" Nov 25 16:16:23 crc kubenswrapper[4800]: I1125 16:16:23.798958 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51bc0d61-40c5-404a-978c-414717c8e3e9" path="/var/lib/kubelet/pods/51bc0d61-40c5-404a-978c-414717c8e3e9/volumes" Nov 25 16:16:28 crc kubenswrapper[4800]: I1125 16:16:28.677454 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:28 crc kubenswrapper[4800]: I1125 16:16:28.678311 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:28 crc kubenswrapper[4800]: I1125 16:16:28.732119 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:29 crc kubenswrapper[4800]: I1125 16:16:29.721490 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:29 crc kubenswrapper[4800]: I1125 16:16:29.783898 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7pxg"] Nov 25 16:16:31 crc kubenswrapper[4800]: I1125 16:16:31.688665 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b7pxg" podUID="b36846d7-cb41-4c63-8bd9-7d728cae0459" containerName="registry-server" containerID="cri-o://87a0add99310f0182edb4cd0d6b7b60e13047889b299d05d8a4215fa31aa3f43" gracePeriod=2 Nov 25 16:16:32 crc kubenswrapper[4800]: I1125 16:16:32.739062 4800 generic.go:334] "Generic (PLEG): container finished" podID="b36846d7-cb41-4c63-8bd9-7d728cae0459" containerID="87a0add99310f0182edb4cd0d6b7b60e13047889b299d05d8a4215fa31aa3f43" exitCode=0 Nov 25 16:16:32 crc kubenswrapper[4800]: I1125 16:16:32.739378 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7pxg" event={"ID":"b36846d7-cb41-4c63-8bd9-7d728cae0459","Type":"ContainerDied","Data":"87a0add99310f0182edb4cd0d6b7b60e13047889b299d05d8a4215fa31aa3f43"} Nov 25 16:16:32 crc kubenswrapper[4800]: I1125 16:16:32.739928 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7pxg" event={"ID":"b36846d7-cb41-4c63-8bd9-7d728cae0459","Type":"ContainerDied","Data":"d60e44a6088bd8d112e0670474109d12c1440b9fb9126ebddb22798096942f70"} Nov 25 16:16:32 crc kubenswrapper[4800]: I1125 16:16:32.739949 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d60e44a6088bd8d112e0670474109d12c1440b9fb9126ebddb22798096942f70" Nov 25 16:16:32 crc kubenswrapper[4800]: I1125 16:16:32.837464 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:32 crc kubenswrapper[4800]: I1125 16:16:32.975191 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b36846d7-cb41-4c63-8bd9-7d728cae0459-utilities\") pod \"b36846d7-cb41-4c63-8bd9-7d728cae0459\" (UID: \"b36846d7-cb41-4c63-8bd9-7d728cae0459\") " Nov 25 16:16:32 crc kubenswrapper[4800]: I1125 16:16:32.975543 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w88d6\" (UniqueName: \"kubernetes.io/projected/b36846d7-cb41-4c63-8bd9-7d728cae0459-kube-api-access-w88d6\") pod \"b36846d7-cb41-4c63-8bd9-7d728cae0459\" (UID: \"b36846d7-cb41-4c63-8bd9-7d728cae0459\") " Nov 25 16:16:32 crc kubenswrapper[4800]: I1125 16:16:32.975618 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b36846d7-cb41-4c63-8bd9-7d728cae0459-catalog-content\") pod \"b36846d7-cb41-4c63-8bd9-7d728cae0459\" (UID: \"b36846d7-cb41-4c63-8bd9-7d728cae0459\") " Nov 25 16:16:32 crc kubenswrapper[4800]: I1125 16:16:32.977570 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b36846d7-cb41-4c63-8bd9-7d728cae0459-utilities" (OuterVolumeSpecName: "utilities") pod "b36846d7-cb41-4c63-8bd9-7d728cae0459" (UID: "b36846d7-cb41-4c63-8bd9-7d728cae0459"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:16:32 crc kubenswrapper[4800]: I1125 16:16:32.990871 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b36846d7-cb41-4c63-8bd9-7d728cae0459-kube-api-access-w88d6" (OuterVolumeSpecName: "kube-api-access-w88d6") pod "b36846d7-cb41-4c63-8bd9-7d728cae0459" (UID: "b36846d7-cb41-4c63-8bd9-7d728cae0459"). InnerVolumeSpecName "kube-api-access-w88d6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:16:33 crc kubenswrapper[4800]: I1125 16:16:33.006279 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b36846d7-cb41-4c63-8bd9-7d728cae0459-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b36846d7-cb41-4c63-8bd9-7d728cae0459" (UID: "b36846d7-cb41-4c63-8bd9-7d728cae0459"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:16:33 crc kubenswrapper[4800]: I1125 16:16:33.078435 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w88d6\" (UniqueName: \"kubernetes.io/projected/b36846d7-cb41-4c63-8bd9-7d728cae0459-kube-api-access-w88d6\") on node \"crc\" DevicePath \"\"" Nov 25 16:16:33 crc kubenswrapper[4800]: I1125 16:16:33.078914 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b36846d7-cb41-4c63-8bd9-7d728cae0459-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:16:33 crc kubenswrapper[4800]: I1125 16:16:33.078926 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b36846d7-cb41-4c63-8bd9-7d728cae0459-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:16:33 crc kubenswrapper[4800]: I1125 16:16:33.749946 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7pxg" Nov 25 16:16:33 crc kubenswrapper[4800]: I1125 16:16:33.801598 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7pxg"] Nov 25 16:16:33 crc kubenswrapper[4800]: I1125 16:16:33.832077 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7pxg"] Nov 25 16:16:35 crc kubenswrapper[4800]: I1125 16:16:35.807453 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b36846d7-cb41-4c63-8bd9-7d728cae0459" path="/var/lib/kubelet/pods/b36846d7-cb41-4c63-8bd9-7d728cae0459/volumes" Nov 25 16:16:42 crc kubenswrapper[4800]: I1125 16:16:42.640295 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:16:42 crc kubenswrapper[4800]: I1125 16:16:42.640923 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:17:12 crc kubenswrapper[4800]: I1125 16:17:12.640425 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:17:12 crc kubenswrapper[4800]: I1125 16:17:12.641350 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:17:12 crc kubenswrapper[4800]: I1125 16:17:12.641439 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 16:17:12 crc kubenswrapper[4800]: I1125 16:17:12.643517 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1fbc3a3e2d5adfe5cf06ea9c7ae6a3f2cf274b421d1863625e183da94fc1b793"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:17:12 crc kubenswrapper[4800]: I1125 16:17:12.643638 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://1fbc3a3e2d5adfe5cf06ea9c7ae6a3f2cf274b421d1863625e183da94fc1b793" gracePeriod=600 Nov 25 16:17:13 crc kubenswrapper[4800]: I1125 16:17:13.191640 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="1fbc3a3e2d5adfe5cf06ea9c7ae6a3f2cf274b421d1863625e183da94fc1b793" exitCode=0 Nov 25 16:17:13 crc kubenswrapper[4800]: I1125 16:17:13.191735 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"1fbc3a3e2d5adfe5cf06ea9c7ae6a3f2cf274b421d1863625e183da94fc1b793"} Nov 25 16:17:13 crc kubenswrapper[4800]: I1125 16:17:13.192195 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107"} Nov 25 16:17:13 crc kubenswrapper[4800]: I1125 16:17:13.192238 4800 scope.go:117] "RemoveContainer" containerID="4793702f421526ca2edaf0cd75dbfecfecac2015c67bf28c0ed0bb1cb72b356e" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.758720 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest-s00-full"] Nov 25 16:18:13 crc kubenswrapper[4800]: E1125 16:18:13.760299 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b36846d7-cb41-4c63-8bd9-7d728cae0459" containerName="extract-utilities" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.760327 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b36846d7-cb41-4c63-8bd9-7d728cae0459" containerName="extract-utilities" Nov 25 16:18:13 crc kubenswrapper[4800]: E1125 16:18:13.760341 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b36846d7-cb41-4c63-8bd9-7d728cae0459" containerName="extract-content" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.760349 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b36846d7-cb41-4c63-8bd9-7d728cae0459" containerName="extract-content" Nov 25 16:18:13 crc kubenswrapper[4800]: E1125 16:18:13.760359 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51bc0d61-40c5-404a-978c-414717c8e3e9" containerName="manager" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.760369 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="51bc0d61-40c5-404a-978c-414717c8e3e9" containerName="manager" Nov 25 16:18:13 crc kubenswrapper[4800]: E1125 16:18:13.760401 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51bc0d61-40c5-404a-978c-414717c8e3e9" containerName="kube-rbac-proxy" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.760409 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="51bc0d61-40c5-404a-978c-414717c8e3e9" containerName="kube-rbac-proxy" Nov 25 16:18:13 crc kubenswrapper[4800]: E1125 16:18:13.760434 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b36846d7-cb41-4c63-8bd9-7d728cae0459" containerName="registry-server" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.760442 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b36846d7-cb41-4c63-8bd9-7d728cae0459" containerName="registry-server" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.760732 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="51bc0d61-40c5-404a-978c-414717c8e3e9" containerName="kube-rbac-proxy" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.760795 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="51bc0d61-40c5-404a-978c-414717c8e3e9" containerName="manager" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.760810 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b36846d7-cb41-4c63-8bd9-7d728cae0459" containerName="registry-server" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.762007 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.766339 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.768506 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.768906 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.769437 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-wsw92" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.804283 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mg77c\" (UniqueName: \"kubernetes.io/projected/9f498125-ffd2-4526-8234-3e89d84f5753-kube-api-access-mg77c\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.804456 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f498125-ffd2-4526-8234-3e89d84f5753-config-data\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.804546 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.804604 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ceph\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.804811 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/9f498125-ffd2-4526-8234-3e89d84f5753-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.804964 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ssh-key\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.805133 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-openstack-config-secret\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.805196 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ca-certs\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.805328 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/9f498125-ffd2-4526-8234-3e89d84f5753-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.805434 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9f498125-ffd2-4526-8234-3e89d84f5753-openstack-config\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.809424 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s00-full"] Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.907436 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/9f498125-ffd2-4526-8234-3e89d84f5753-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.907537 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ssh-key\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.907604 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-openstack-config-secret\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.907636 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ca-certs\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.907707 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/9f498125-ffd2-4526-8234-3e89d84f5753-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.907748 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9f498125-ffd2-4526-8234-3e89d84f5753-openstack-config\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.907877 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mg77c\" (UniqueName: \"kubernetes.io/projected/9f498125-ffd2-4526-8234-3e89d84f5753-kube-api-access-mg77c\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.907972 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f498125-ffd2-4526-8234-3e89d84f5753-config-data\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.908031 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.908069 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ceph\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.909728 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/9f498125-ffd2-4526-8234-3e89d84f5753-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.909888 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/9f498125-ffd2-4526-8234-3e89d84f5753-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.910895 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.911096 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9f498125-ffd2-4526-8234-3e89d84f5753-openstack-config\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.912258 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f498125-ffd2-4526-8234-3e89d84f5753-config-data\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.917082 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ssh-key\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.918614 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ca-certs\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.919286 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-openstack-config-secret\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.921977 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ceph\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.942486 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mg77c\" (UniqueName: \"kubernetes.io/projected/9f498125-ffd2-4526-8234-3e89d84f5753-kube-api-access-mg77c\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:13 crc kubenswrapper[4800]: I1125 16:18:13.962703 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest-s00-full\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:14 crc kubenswrapper[4800]: I1125 16:18:14.097678 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-full" Nov 25 16:18:14 crc kubenswrapper[4800]: I1125 16:18:14.580265 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s00-full"] Nov 25 16:18:14 crc kubenswrapper[4800]: I1125 16:18:14.986854 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-full" event={"ID":"9f498125-ffd2-4526-8234-3e89d84f5753","Type":"ContainerStarted","Data":"3b0f62b8a4db084b9c05fe2f620aca0b36a87ee9ee1038f7e1a7d362da71b046"} Nov 25 16:18:46 crc kubenswrapper[4800]: E1125 16:18:46.682126 4800 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 25 16:18:46 crc kubenswrapper[4800]: E1125 16:18:46.682945 4800 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ceph,ReadOnly:true,MountPath:/etc/ceph,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mg77c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest-s00-full_openstack(9f498125-ffd2-4526-8234-3e89d84f5753): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 16:18:46 crc kubenswrapper[4800]: E1125 16:18:46.684533 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest-s00-full" podUID="9f498125-ffd2-4526-8234-3e89d84f5753" Nov 25 16:18:47 crc kubenswrapper[4800]: E1125 16:18:47.404912 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest-s00-full" podUID="9f498125-ffd2-4526-8234-3e89d84f5753" Nov 25 16:19:00 crc kubenswrapper[4800]: I1125 16:19:00.289406 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 16:19:01 crc kubenswrapper[4800]: I1125 16:19:01.601699 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-full" event={"ID":"9f498125-ffd2-4526-8234-3e89d84f5753","Type":"ContainerStarted","Data":"b03c0df3eb8612a617a189531059cbc6fb2f99fe2c6b82ed01b7250b31813a1c"} Nov 25 16:19:01 crc kubenswrapper[4800]: I1125 16:19:01.635952 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest-s00-full" podStartSLOduration=3.934572214 podStartE2EDuration="49.635929771s" podCreationTimestamp="2025-11-25 16:18:12 +0000 UTC" firstStartedPulling="2025-11-25 16:18:14.584415028 +0000 UTC m=+3655.638823520" lastFinishedPulling="2025-11-25 16:19:00.285772605 +0000 UTC m=+3701.340181077" observedRunningTime="2025-11-25 16:19:01.629249758 +0000 UTC m=+3702.683658270" watchObservedRunningTime="2025-11-25 16:19:01.635929771 +0000 UTC m=+3702.690338253" Nov 25 16:19:35 crc kubenswrapper[4800]: I1125 16:19:35.763321 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p88sh"] Nov 25 16:19:35 crc kubenswrapper[4800]: I1125 16:19:35.768397 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:35 crc kubenswrapper[4800]: I1125 16:19:35.783117 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p88sh"] Nov 25 16:19:35 crc kubenswrapper[4800]: I1125 16:19:35.924239 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-utilities\") pod \"redhat-operators-p88sh\" (UID: \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\") " pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:35 crc kubenswrapper[4800]: I1125 16:19:35.924578 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-catalog-content\") pod \"redhat-operators-p88sh\" (UID: \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\") " pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:35 crc kubenswrapper[4800]: I1125 16:19:35.924631 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhxjg\" (UniqueName: \"kubernetes.io/projected/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-kube-api-access-dhxjg\") pod \"redhat-operators-p88sh\" (UID: \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\") " pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:36 crc kubenswrapper[4800]: I1125 16:19:36.027730 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-catalog-content\") pod \"redhat-operators-p88sh\" (UID: \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\") " pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:36 crc kubenswrapper[4800]: I1125 16:19:36.026926 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-catalog-content\") pod \"redhat-operators-p88sh\" (UID: \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\") " pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:36 crc kubenswrapper[4800]: I1125 16:19:36.027908 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhxjg\" (UniqueName: \"kubernetes.io/projected/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-kube-api-access-dhxjg\") pod \"redhat-operators-p88sh\" (UID: \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\") " pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:36 crc kubenswrapper[4800]: I1125 16:19:36.028002 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-utilities\") pod \"redhat-operators-p88sh\" (UID: \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\") " pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:36 crc kubenswrapper[4800]: I1125 16:19:36.028589 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-utilities\") pod \"redhat-operators-p88sh\" (UID: \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\") " pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:36 crc kubenswrapper[4800]: I1125 16:19:36.053816 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhxjg\" (UniqueName: \"kubernetes.io/projected/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-kube-api-access-dhxjg\") pod \"redhat-operators-p88sh\" (UID: \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\") " pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:36 crc kubenswrapper[4800]: I1125 16:19:36.117688 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:36 crc kubenswrapper[4800]: I1125 16:19:36.616946 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p88sh"] Nov 25 16:19:37 crc kubenswrapper[4800]: I1125 16:19:37.069483 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p88sh" event={"ID":"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3","Type":"ContainerStarted","Data":"c959d951e65d16091b00d692c85a238ae50e293d71fe18e7e0500f064cfe9fec"} Nov 25 16:19:42 crc kubenswrapper[4800]: I1125 16:19:42.141627 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p88sh" event={"ID":"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3","Type":"ContainerStarted","Data":"185cc2126d03a94356236fb5f11cc5a5292e2c21e33902cf1fd9f60f0f194904"} Nov 25 16:19:42 crc kubenswrapper[4800]: I1125 16:19:42.640286 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:19:42 crc kubenswrapper[4800]: I1125 16:19:42.640375 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:19:43 crc kubenswrapper[4800]: I1125 16:19:43.158658 4800 generic.go:334] "Generic (PLEG): container finished" podID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" containerID="185cc2126d03a94356236fb5f11cc5a5292e2c21e33902cf1fd9f60f0f194904" exitCode=0 Nov 25 16:19:43 crc kubenswrapper[4800]: I1125 16:19:43.158752 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p88sh" event={"ID":"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3","Type":"ContainerDied","Data":"185cc2126d03a94356236fb5f11cc5a5292e2c21e33902cf1fd9f60f0f194904"} Nov 25 16:19:45 crc kubenswrapper[4800]: I1125 16:19:45.182223 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p88sh" event={"ID":"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3","Type":"ContainerStarted","Data":"cd1e84436c786f408b49cf043f5589a53d4b993b9f3d745d1f98ce76214957f3"} Nov 25 16:19:46 crc kubenswrapper[4800]: I1125 16:19:46.197695 4800 generic.go:334] "Generic (PLEG): container finished" podID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" containerID="cd1e84436c786f408b49cf043f5589a53d4b993b9f3d745d1f98ce76214957f3" exitCode=0 Nov 25 16:19:46 crc kubenswrapper[4800]: I1125 16:19:46.197799 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p88sh" event={"ID":"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3","Type":"ContainerDied","Data":"cd1e84436c786f408b49cf043f5589a53d4b993b9f3d745d1f98ce76214957f3"} Nov 25 16:19:52 crc kubenswrapper[4800]: I1125 16:19:52.276245 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p88sh" event={"ID":"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3","Type":"ContainerStarted","Data":"c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119"} Nov 25 16:19:52 crc kubenswrapper[4800]: I1125 16:19:52.305413 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p88sh" podStartSLOduration=8.833058671 podStartE2EDuration="17.30538674s" podCreationTimestamp="2025-11-25 16:19:35 +0000 UTC" firstStartedPulling="2025-11-25 16:19:43.162651896 +0000 UTC m=+3744.217060378" lastFinishedPulling="2025-11-25 16:19:51.634979945 +0000 UTC m=+3752.689388447" observedRunningTime="2025-11-25 16:19:52.297823473 +0000 UTC m=+3753.352231965" watchObservedRunningTime="2025-11-25 16:19:52.30538674 +0000 UTC m=+3753.359795222" Nov 25 16:19:56 crc kubenswrapper[4800]: I1125 16:19:56.118531 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:56 crc kubenswrapper[4800]: I1125 16:19:56.119372 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:19:57 crc kubenswrapper[4800]: I1125 16:19:57.195950 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-p88sh" podUID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" containerName="registry-server" probeResult="failure" output=< Nov 25 16:19:57 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 16:19:57 crc kubenswrapper[4800]: > Nov 25 16:20:06 crc kubenswrapper[4800]: I1125 16:20:06.176135 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:20:06 crc kubenswrapper[4800]: I1125 16:20:06.235943 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:20:07 crc kubenswrapper[4800]: I1125 16:20:07.000379 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p88sh"] Nov 25 16:20:07 crc kubenswrapper[4800]: I1125 16:20:07.464727 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p88sh" podUID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" containerName="registry-server" containerID="cri-o://c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119" gracePeriod=2 Nov 25 16:20:07 crc kubenswrapper[4800]: I1125 16:20:07.991289 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.115340 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-utilities\") pod \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\" (UID: \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\") " Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.115531 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhxjg\" (UniqueName: \"kubernetes.io/projected/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-kube-api-access-dhxjg\") pod \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\" (UID: \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\") " Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.115664 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-catalog-content\") pod \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\" (UID: \"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3\") " Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.116192 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-utilities" (OuterVolumeSpecName: "utilities") pod "dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" (UID: "dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.117008 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.127039 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-kube-api-access-dhxjg" (OuterVolumeSpecName: "kube-api-access-dhxjg") pod "dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" (UID: "dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3"). InnerVolumeSpecName "kube-api-access-dhxjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.207768 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" (UID: "dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.219653 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhxjg\" (UniqueName: \"kubernetes.io/projected/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-kube-api-access-dhxjg\") on node \"crc\" DevicePath \"\"" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.219709 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.475814 4800 generic.go:334] "Generic (PLEG): container finished" podID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" containerID="c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119" exitCode=0 Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.475920 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p88sh" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.475897 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p88sh" event={"ID":"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3","Type":"ContainerDied","Data":"c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119"} Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.476068 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p88sh" event={"ID":"dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3","Type":"ContainerDied","Data":"c959d951e65d16091b00d692c85a238ae50e293d71fe18e7e0500f064cfe9fec"} Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.476102 4800 scope.go:117] "RemoveContainer" containerID="c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.511162 4800 scope.go:117] "RemoveContainer" containerID="cd1e84436c786f408b49cf043f5589a53d4b993b9f3d745d1f98ce76214957f3" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.521532 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p88sh"] Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.532167 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p88sh"] Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.539002 4800 scope.go:117] "RemoveContainer" containerID="185cc2126d03a94356236fb5f11cc5a5292e2c21e33902cf1fd9f60f0f194904" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.599915 4800 scope.go:117] "RemoveContainer" containerID="c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119" Nov 25 16:20:08 crc kubenswrapper[4800]: E1125 16:20:08.600779 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119\": container with ID starting with c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119 not found: ID does not exist" containerID="c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.600868 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119"} err="failed to get container status \"c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119\": rpc error: code = NotFound desc = could not find container \"c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119\": container with ID starting with c084772b23f2fd1cc8f8842a7cbe29e4915b0ca972e2500a1ce77e570654d119 not found: ID does not exist" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.600913 4800 scope.go:117] "RemoveContainer" containerID="cd1e84436c786f408b49cf043f5589a53d4b993b9f3d745d1f98ce76214957f3" Nov 25 16:20:08 crc kubenswrapper[4800]: E1125 16:20:08.601438 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd1e84436c786f408b49cf043f5589a53d4b993b9f3d745d1f98ce76214957f3\": container with ID starting with cd1e84436c786f408b49cf043f5589a53d4b993b9f3d745d1f98ce76214957f3 not found: ID does not exist" containerID="cd1e84436c786f408b49cf043f5589a53d4b993b9f3d745d1f98ce76214957f3" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.601491 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd1e84436c786f408b49cf043f5589a53d4b993b9f3d745d1f98ce76214957f3"} err="failed to get container status \"cd1e84436c786f408b49cf043f5589a53d4b993b9f3d745d1f98ce76214957f3\": rpc error: code = NotFound desc = could not find container \"cd1e84436c786f408b49cf043f5589a53d4b993b9f3d745d1f98ce76214957f3\": container with ID starting with cd1e84436c786f408b49cf043f5589a53d4b993b9f3d745d1f98ce76214957f3 not found: ID does not exist" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.601528 4800 scope.go:117] "RemoveContainer" containerID="185cc2126d03a94356236fb5f11cc5a5292e2c21e33902cf1fd9f60f0f194904" Nov 25 16:20:08 crc kubenswrapper[4800]: E1125 16:20:08.602285 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"185cc2126d03a94356236fb5f11cc5a5292e2c21e33902cf1fd9f60f0f194904\": container with ID starting with 185cc2126d03a94356236fb5f11cc5a5292e2c21e33902cf1fd9f60f0f194904 not found: ID does not exist" containerID="185cc2126d03a94356236fb5f11cc5a5292e2c21e33902cf1fd9f60f0f194904" Nov 25 16:20:08 crc kubenswrapper[4800]: I1125 16:20:08.602357 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"185cc2126d03a94356236fb5f11cc5a5292e2c21e33902cf1fd9f60f0f194904"} err="failed to get container status \"185cc2126d03a94356236fb5f11cc5a5292e2c21e33902cf1fd9f60f0f194904\": rpc error: code = NotFound desc = could not find container \"185cc2126d03a94356236fb5f11cc5a5292e2c21e33902cf1fd9f60f0f194904\": container with ID starting with 185cc2126d03a94356236fb5f11cc5a5292e2c21e33902cf1fd9f60f0f194904 not found: ID does not exist" Nov 25 16:20:09 crc kubenswrapper[4800]: I1125 16:20:09.804430 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" path="/var/lib/kubelet/pods/dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3/volumes" Nov 25 16:20:12 crc kubenswrapper[4800]: I1125 16:20:12.640228 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:20:12 crc kubenswrapper[4800]: I1125 16:20:12.641232 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:20:29 crc kubenswrapper[4800]: I1125 16:20:29.171820 4800 scope.go:117] "RemoveContainer" containerID="475a6c78dddffc4cbad18c8ea408a71f331d7782bdfe25bda95a16e9e1a9b311" Nov 25 16:20:29 crc kubenswrapper[4800]: I1125 16:20:29.229727 4800 scope.go:117] "RemoveContainer" containerID="5cc41b8662120e04b08d74a47070aa117d4aca3d09300fe9e4ca639e19d5c785" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.526949 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wjxvt"] Nov 25 16:20:31 crc kubenswrapper[4800]: E1125 16:20:31.528188 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" containerName="extract-content" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.528207 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" containerName="extract-content" Nov 25 16:20:31 crc kubenswrapper[4800]: E1125 16:20:31.528238 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" containerName="registry-server" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.528246 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" containerName="registry-server" Nov 25 16:20:31 crc kubenswrapper[4800]: E1125 16:20:31.528260 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" containerName="extract-utilities" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.528271 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" containerName="extract-utilities" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.528532 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfd4bdb4-4ae6-4606-b9e1-ee02d05207f3" containerName="registry-server" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.530564 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.534790 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wjxvt"] Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.574796 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-utilities\") pod \"community-operators-wjxvt\" (UID: \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\") " pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.574945 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qh9sx\" (UniqueName: \"kubernetes.io/projected/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-kube-api-access-qh9sx\") pod \"community-operators-wjxvt\" (UID: \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\") " pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.575151 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-catalog-content\") pod \"community-operators-wjxvt\" (UID: \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\") " pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.676639 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-utilities\") pod \"community-operators-wjxvt\" (UID: \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\") " pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.676736 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qh9sx\" (UniqueName: \"kubernetes.io/projected/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-kube-api-access-qh9sx\") pod \"community-operators-wjxvt\" (UID: \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\") " pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.676887 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-catalog-content\") pod \"community-operators-wjxvt\" (UID: \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\") " pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.677321 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-utilities\") pod \"community-operators-wjxvt\" (UID: \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\") " pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.677587 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-catalog-content\") pod \"community-operators-wjxvt\" (UID: \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\") " pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.702038 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qh9sx\" (UniqueName: \"kubernetes.io/projected/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-kube-api-access-qh9sx\") pod \"community-operators-wjxvt\" (UID: \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\") " pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:31 crc kubenswrapper[4800]: I1125 16:20:31.908976 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:32 crc kubenswrapper[4800]: I1125 16:20:32.544374 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wjxvt"] Nov 25 16:20:32 crc kubenswrapper[4800]: I1125 16:20:32.887772 4800 generic.go:334] "Generic (PLEG): container finished" podID="60f0f3a5-7816-4b0f-b1d0-b0697a45b900" containerID="c2d8a514fe80b7f859d44f9b4efc5de09b75b4be9f8cd50b40bfff30a0a7bc0f" exitCode=0 Nov 25 16:20:32 crc kubenswrapper[4800]: I1125 16:20:32.887882 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjxvt" event={"ID":"60f0f3a5-7816-4b0f-b1d0-b0697a45b900","Type":"ContainerDied","Data":"c2d8a514fe80b7f859d44f9b4efc5de09b75b4be9f8cd50b40bfff30a0a7bc0f"} Nov 25 16:20:32 crc kubenswrapper[4800]: I1125 16:20:32.888271 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjxvt" event={"ID":"60f0f3a5-7816-4b0f-b1d0-b0697a45b900","Type":"ContainerStarted","Data":"10639a259aa7f61e793855d31565aa8088cacbbec3dbcdec7eea0d211676fe6c"} Nov 25 16:20:32 crc kubenswrapper[4800]: I1125 16:20:32.891549 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 16:20:34 crc kubenswrapper[4800]: I1125 16:20:34.918597 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjxvt" event={"ID":"60f0f3a5-7816-4b0f-b1d0-b0697a45b900","Type":"ContainerStarted","Data":"8d2febdac375be2070f77dc66c7528141c2152c75600ae7adc918a04c2a1d853"} Nov 25 16:20:35 crc kubenswrapper[4800]: I1125 16:20:35.936163 4800 generic.go:334] "Generic (PLEG): container finished" podID="60f0f3a5-7816-4b0f-b1d0-b0697a45b900" containerID="8d2febdac375be2070f77dc66c7528141c2152c75600ae7adc918a04c2a1d853" exitCode=0 Nov 25 16:20:35 crc kubenswrapper[4800]: I1125 16:20:35.936259 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjxvt" event={"ID":"60f0f3a5-7816-4b0f-b1d0-b0697a45b900","Type":"ContainerDied","Data":"8d2febdac375be2070f77dc66c7528141c2152c75600ae7adc918a04c2a1d853"} Nov 25 16:20:37 crc kubenswrapper[4800]: I1125 16:20:37.969541 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjxvt" event={"ID":"60f0f3a5-7816-4b0f-b1d0-b0697a45b900","Type":"ContainerStarted","Data":"5c11093c4c67b2516c5eaf0e12a2be7572d7dd07bd77309142f81007f0eded57"} Nov 25 16:20:37 crc kubenswrapper[4800]: I1125 16:20:37.997613 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wjxvt" podStartSLOduration=3.169308414 podStartE2EDuration="6.997583768s" podCreationTimestamp="2025-11-25 16:20:31 +0000 UTC" firstStartedPulling="2025-11-25 16:20:32.891246948 +0000 UTC m=+3793.945655420" lastFinishedPulling="2025-11-25 16:20:36.719522292 +0000 UTC m=+3797.773930774" observedRunningTime="2025-11-25 16:20:37.992252532 +0000 UTC m=+3799.046661034" watchObservedRunningTime="2025-11-25 16:20:37.997583768 +0000 UTC m=+3799.051992250" Nov 25 16:20:41 crc kubenswrapper[4800]: I1125 16:20:41.910513 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:41 crc kubenswrapper[4800]: I1125 16:20:41.911693 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:42 crc kubenswrapper[4800]: I1125 16:20:41.999598 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:42 crc kubenswrapper[4800]: I1125 16:20:42.103518 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:42 crc kubenswrapper[4800]: I1125 16:20:42.261789 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wjxvt"] Nov 25 16:20:42 crc kubenswrapper[4800]: I1125 16:20:42.640029 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:20:42 crc kubenswrapper[4800]: I1125 16:20:42.640130 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:20:42 crc kubenswrapper[4800]: I1125 16:20:42.640209 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 16:20:42 crc kubenswrapper[4800]: I1125 16:20:42.641629 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:20:42 crc kubenswrapper[4800]: I1125 16:20:42.641778 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" gracePeriod=600 Nov 25 16:20:43 crc kubenswrapper[4800]: E1125 16:20:43.110880 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:20:44 crc kubenswrapper[4800]: I1125 16:20:44.067741 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" exitCode=0 Nov 25 16:20:44 crc kubenswrapper[4800]: I1125 16:20:44.067894 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107"} Nov 25 16:20:44 crc kubenswrapper[4800]: I1125 16:20:44.067980 4800 scope.go:117] "RemoveContainer" containerID="1fbc3a3e2d5adfe5cf06ea9c7ae6a3f2cf274b421d1863625e183da94fc1b793" Nov 25 16:20:44 crc kubenswrapper[4800]: I1125 16:20:44.068127 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wjxvt" podUID="60f0f3a5-7816-4b0f-b1d0-b0697a45b900" containerName="registry-server" containerID="cri-o://5c11093c4c67b2516c5eaf0e12a2be7572d7dd07bd77309142f81007f0eded57" gracePeriod=2 Nov 25 16:20:44 crc kubenswrapper[4800]: I1125 16:20:44.069230 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:20:44 crc kubenswrapper[4800]: E1125 16:20:44.069808 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.082825 4800 generic.go:334] "Generic (PLEG): container finished" podID="60f0f3a5-7816-4b0f-b1d0-b0697a45b900" containerID="5c11093c4c67b2516c5eaf0e12a2be7572d7dd07bd77309142f81007f0eded57" exitCode=0 Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.083604 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjxvt" event={"ID":"60f0f3a5-7816-4b0f-b1d0-b0697a45b900","Type":"ContainerDied","Data":"5c11093c4c67b2516c5eaf0e12a2be7572d7dd07bd77309142f81007f0eded57"} Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.083672 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjxvt" event={"ID":"60f0f3a5-7816-4b0f-b1d0-b0697a45b900","Type":"ContainerDied","Data":"10639a259aa7f61e793855d31565aa8088cacbbec3dbcdec7eea0d211676fe6c"} Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.083687 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10639a259aa7f61e793855d31565aa8088cacbbec3dbcdec7eea0d211676fe6c" Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.151955 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.218583 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qh9sx\" (UniqueName: \"kubernetes.io/projected/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-kube-api-access-qh9sx\") pod \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\" (UID: \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\") " Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.218900 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-catalog-content\") pod \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\" (UID: \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\") " Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.218975 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-utilities\") pod \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\" (UID: \"60f0f3a5-7816-4b0f-b1d0-b0697a45b900\") " Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.221781 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-utilities" (OuterVolumeSpecName: "utilities") pod "60f0f3a5-7816-4b0f-b1d0-b0697a45b900" (UID: "60f0f3a5-7816-4b0f-b1d0-b0697a45b900"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.230535 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-kube-api-access-qh9sx" (OuterVolumeSpecName: "kube-api-access-qh9sx") pod "60f0f3a5-7816-4b0f-b1d0-b0697a45b900" (UID: "60f0f3a5-7816-4b0f-b1d0-b0697a45b900"). InnerVolumeSpecName "kube-api-access-qh9sx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.282712 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "60f0f3a5-7816-4b0f-b1d0-b0697a45b900" (UID: "60f0f3a5-7816-4b0f-b1d0-b0697a45b900"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.321811 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.321887 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:20:45 crc kubenswrapper[4800]: I1125 16:20:45.321903 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qh9sx\" (UniqueName: \"kubernetes.io/projected/60f0f3a5-7816-4b0f-b1d0-b0697a45b900-kube-api-access-qh9sx\") on node \"crc\" DevicePath \"\"" Nov 25 16:20:46 crc kubenswrapper[4800]: I1125 16:20:46.096522 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wjxvt" Nov 25 16:20:46 crc kubenswrapper[4800]: I1125 16:20:46.125762 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wjxvt"] Nov 25 16:20:46 crc kubenswrapper[4800]: I1125 16:20:46.135216 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wjxvt"] Nov 25 16:20:47 crc kubenswrapper[4800]: I1125 16:20:47.797559 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60f0f3a5-7816-4b0f-b1d0-b0697a45b900" path="/var/lib/kubelet/pods/60f0f3a5-7816-4b0f-b1d0-b0697a45b900/volumes" Nov 25 16:20:58 crc kubenswrapper[4800]: I1125 16:20:58.789077 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:20:58 crc kubenswrapper[4800]: E1125 16:20:58.790180 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:21:07 crc kubenswrapper[4800]: I1125 16:21:07.839959 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="b3409070-5204-4027-b692-201d89bbb758" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.156:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 16:21:08 crc kubenswrapper[4800]: I1125 16:21:08.251114 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-volume-volume1-0" podUID="9418bcee-6bf4-4758-9ffc-ce6945012a4e" containerName="cinder-volume" probeResult="failure" output="Get \"http://10.217.0.237:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 16:21:08 crc kubenswrapper[4800]: I1125 16:21:08.251115 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-backup-0" podUID="7be6b551-566a-410c-b8f9-892dee455826" containerName="cinder-backup" probeResult="failure" output="Get \"http://10.217.0.238:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 16:21:09 crc kubenswrapper[4800]: I1125 16:21:09.810695 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:21:09 crc kubenswrapper[4800]: E1125 16:21:09.811390 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:21:20 crc kubenswrapper[4800]: I1125 16:21:20.786109 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:21:20 crc kubenswrapper[4800]: E1125 16:21:20.787286 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:21:35 crc kubenswrapper[4800]: I1125 16:21:35.786275 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:21:35 crc kubenswrapper[4800]: E1125 16:21:35.787238 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:21:48 crc kubenswrapper[4800]: I1125 16:21:48.786672 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:21:48 crc kubenswrapper[4800]: E1125 16:21:48.787645 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:22:02 crc kubenswrapper[4800]: I1125 16:22:02.787589 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:22:02 crc kubenswrapper[4800]: E1125 16:22:02.789011 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:22:15 crc kubenswrapper[4800]: I1125 16:22:15.786618 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:22:15 crc kubenswrapper[4800]: E1125 16:22:15.787767 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:22:29 crc kubenswrapper[4800]: I1125 16:22:29.393570 4800 scope.go:117] "RemoveContainer" containerID="a9dafb9c65fb6a72288eecd6567bf57a1812a3c5bc34bc618b71523ebdc74ad3" Nov 25 16:22:29 crc kubenswrapper[4800]: I1125 16:22:29.422281 4800 scope.go:117] "RemoveContainer" containerID="a5a80765790afb2e60b9ce50864c02ca969a70a472bf863449680b2a6cd48a51" Nov 25 16:22:29 crc kubenswrapper[4800]: I1125 16:22:29.489135 4800 scope.go:117] "RemoveContainer" containerID="87a0add99310f0182edb4cd0d6b7b60e13047889b299d05d8a4215fa31aa3f43" Nov 25 16:22:30 crc kubenswrapper[4800]: I1125 16:22:30.786782 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:22:30 crc kubenswrapper[4800]: E1125 16:22:30.787198 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:22:42 crc kubenswrapper[4800]: I1125 16:22:42.785863 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:22:42 crc kubenswrapper[4800]: E1125 16:22:42.787026 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:22:55 crc kubenswrapper[4800]: I1125 16:22:55.786394 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:22:55 crc kubenswrapper[4800]: E1125 16:22:55.788072 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:23:02 crc kubenswrapper[4800]: I1125 16:23:02.046828 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-z5lr8"] Nov 25 16:23:02 crc kubenswrapper[4800]: I1125 16:23:02.061366 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-b395-account-create-4xzz6"] Nov 25 16:23:02 crc kubenswrapper[4800]: I1125 16:23:02.073136 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-z5lr8"] Nov 25 16:23:02 crc kubenswrapper[4800]: I1125 16:23:02.082610 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-b395-account-create-4xzz6"] Nov 25 16:23:03 crc kubenswrapper[4800]: I1125 16:23:03.798526 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f552120-84af-44ca-a39f-cd3e5f895ed5" path="/var/lib/kubelet/pods/7f552120-84af-44ca-a39f-cd3e5f895ed5/volumes" Nov 25 16:23:03 crc kubenswrapper[4800]: I1125 16:23:03.799981 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b995efb0-426b-470e-a79c-9be50ad73e64" path="/var/lib/kubelet/pods/b995efb0-426b-470e-a79c-9be50ad73e64/volumes" Nov 25 16:23:08 crc kubenswrapper[4800]: I1125 16:23:08.785978 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:23:08 crc kubenswrapper[4800]: E1125 16:23:08.786937 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:23:23 crc kubenswrapper[4800]: I1125 16:23:23.787639 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:23:23 crc kubenswrapper[4800]: E1125 16:23:23.788986 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:23:29 crc kubenswrapper[4800]: I1125 16:23:29.586786 4800 scope.go:117] "RemoveContainer" containerID="75a007bc3f45e2d68a66625de6b8f388c02d9d24d7b0aaa7f3c9eaae61d17620" Nov 25 16:23:30 crc kubenswrapper[4800]: I1125 16:23:30.096404 4800 scope.go:117] "RemoveContainer" containerID="cd1b5681aabde16c1b4e9431e0f1aa4cd99452c70abd7f2895e63710edba1854" Nov 25 16:23:35 crc kubenswrapper[4800]: I1125 16:23:35.047572 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-64scj"] Nov 25 16:23:35 crc kubenswrapper[4800]: I1125 16:23:35.055913 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-64scj"] Nov 25 16:23:35 crc kubenswrapper[4800]: I1125 16:23:35.797361 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="817a76e7-8681-4597-b14d-d404ad3a2801" path="/var/lib/kubelet/pods/817a76e7-8681-4597-b14d-d404ad3a2801/volumes" Nov 25 16:23:37 crc kubenswrapper[4800]: I1125 16:23:37.787024 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:23:37 crc kubenswrapper[4800]: E1125 16:23:37.788147 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:23:52 crc kubenswrapper[4800]: I1125 16:23:52.788430 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:23:52 crc kubenswrapper[4800]: E1125 16:23:52.789535 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:24:07 crc kubenswrapper[4800]: I1125 16:24:07.786056 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:24:07 crc kubenswrapper[4800]: E1125 16:24:07.787373 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:24:22 crc kubenswrapper[4800]: I1125 16:24:22.787251 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:24:22 crc kubenswrapper[4800]: E1125 16:24:22.788089 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:24:30 crc kubenswrapper[4800]: I1125 16:24:30.208218 4800 scope.go:117] "RemoveContainer" containerID="6100b07b51b3f845eb7751ba052dc017336d470eeb874b6fb0c0b6b031e01163" Nov 25 16:24:33 crc kubenswrapper[4800]: I1125 16:24:33.790976 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:24:33 crc kubenswrapper[4800]: E1125 16:24:33.791594 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:24:45 crc kubenswrapper[4800]: I1125 16:24:45.786741 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:24:45 crc kubenswrapper[4800]: E1125 16:24:45.788018 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:24:56 crc kubenswrapper[4800]: I1125 16:24:56.786984 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:24:56 crc kubenswrapper[4800]: E1125 16:24:56.788304 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:25:08 crc kubenswrapper[4800]: I1125 16:25:08.785174 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:25:08 crc kubenswrapper[4800]: E1125 16:25:08.785938 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:25:21 crc kubenswrapper[4800]: I1125 16:25:21.793270 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:25:21 crc kubenswrapper[4800]: E1125 16:25:21.794569 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:25:32 crc kubenswrapper[4800]: I1125 16:25:32.786126 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:25:32 crc kubenswrapper[4800]: E1125 16:25:32.787420 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:25:43 crc kubenswrapper[4800]: I1125 16:25:43.786259 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:25:44 crc kubenswrapper[4800]: I1125 16:25:44.560698 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"0ff732864e7639bf7b5b2bacf496c8f1f2226123826a3746e9fb054405046b72"} Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.449728 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bd829"] Nov 25 16:25:59 crc kubenswrapper[4800]: E1125 16:25:59.451518 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60f0f3a5-7816-4b0f-b1d0-b0697a45b900" containerName="registry-server" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.451550 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="60f0f3a5-7816-4b0f-b1d0-b0697a45b900" containerName="registry-server" Nov 25 16:25:59 crc kubenswrapper[4800]: E1125 16:25:59.451608 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60f0f3a5-7816-4b0f-b1d0-b0697a45b900" containerName="extract-utilities" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.451626 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="60f0f3a5-7816-4b0f-b1d0-b0697a45b900" containerName="extract-utilities" Nov 25 16:25:59 crc kubenswrapper[4800]: E1125 16:25:59.451673 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60f0f3a5-7816-4b0f-b1d0-b0697a45b900" containerName="extract-content" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.451690 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="60f0f3a5-7816-4b0f-b1d0-b0697a45b900" containerName="extract-content" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.452241 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="60f0f3a5-7816-4b0f-b1d0-b0697a45b900" containerName="registry-server" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.455278 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.495572 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bd829"] Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.572574 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-catalog-content\") pod \"certified-operators-bd829\" (UID: \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\") " pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.572644 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-utilities\") pod \"certified-operators-bd829\" (UID: \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\") " pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.572795 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7v8z9\" (UniqueName: \"kubernetes.io/projected/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-kube-api-access-7v8z9\") pod \"certified-operators-bd829\" (UID: \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\") " pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.675358 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7v8z9\" (UniqueName: \"kubernetes.io/projected/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-kube-api-access-7v8z9\") pod \"certified-operators-bd829\" (UID: \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\") " pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.675522 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-catalog-content\") pod \"certified-operators-bd829\" (UID: \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\") " pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.675565 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-utilities\") pod \"certified-operators-bd829\" (UID: \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\") " pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.676178 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-utilities\") pod \"certified-operators-bd829\" (UID: \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\") " pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.676348 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-catalog-content\") pod \"certified-operators-bd829\" (UID: \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\") " pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.700414 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7v8z9\" (UniqueName: \"kubernetes.io/projected/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-kube-api-access-7v8z9\") pod \"certified-operators-bd829\" (UID: \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\") " pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:25:59 crc kubenswrapper[4800]: I1125 16:25:59.783089 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:26:00 crc kubenswrapper[4800]: I1125 16:26:00.583864 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bd829"] Nov 25 16:26:00 crc kubenswrapper[4800]: I1125 16:26:00.735384 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bd829" event={"ID":"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5","Type":"ContainerStarted","Data":"c52b7da65855acc998e3c8488a203e782cdd2dc1e4ad5db6fdab7b49895591a3"} Nov 25 16:26:01 crc kubenswrapper[4800]: I1125 16:26:01.748005 4800 generic.go:334] "Generic (PLEG): container finished" podID="b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" containerID="0e716a32a1833af2de356f6bd7e18ba6c88c729bc5b0d97da3e3d902ee1e0a8e" exitCode=0 Nov 25 16:26:01 crc kubenswrapper[4800]: I1125 16:26:01.748118 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bd829" event={"ID":"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5","Type":"ContainerDied","Data":"0e716a32a1833af2de356f6bd7e18ba6c88c729bc5b0d97da3e3d902ee1e0a8e"} Nov 25 16:26:01 crc kubenswrapper[4800]: I1125 16:26:01.751757 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 16:26:02 crc kubenswrapper[4800]: I1125 16:26:02.758994 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bd829" event={"ID":"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5","Type":"ContainerStarted","Data":"034b7f1b38d02580b8df4b2fb1f93c43eca12d8d65b05a67fc20ced0afff8019"} Nov 25 16:26:03 crc kubenswrapper[4800]: I1125 16:26:03.771088 4800 generic.go:334] "Generic (PLEG): container finished" podID="b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" containerID="034b7f1b38d02580b8df4b2fb1f93c43eca12d8d65b05a67fc20ced0afff8019" exitCode=0 Nov 25 16:26:03 crc kubenswrapper[4800]: I1125 16:26:03.771206 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bd829" event={"ID":"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5","Type":"ContainerDied","Data":"034b7f1b38d02580b8df4b2fb1f93c43eca12d8d65b05a67fc20ced0afff8019"} Nov 25 16:26:04 crc kubenswrapper[4800]: I1125 16:26:04.790556 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bd829" event={"ID":"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5","Type":"ContainerStarted","Data":"6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13"} Nov 25 16:26:04 crc kubenswrapper[4800]: I1125 16:26:04.826080 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bd829" podStartSLOduration=3.3857415140000002 podStartE2EDuration="5.826045285s" podCreationTimestamp="2025-11-25 16:25:59 +0000 UTC" firstStartedPulling="2025-11-25 16:26:01.751453142 +0000 UTC m=+4122.805861624" lastFinishedPulling="2025-11-25 16:26:04.191756913 +0000 UTC m=+4125.246165395" observedRunningTime="2025-11-25 16:26:04.823371112 +0000 UTC m=+4125.877779604" watchObservedRunningTime="2025-11-25 16:26:04.826045285 +0000 UTC m=+4125.880453777" Nov 25 16:26:09 crc kubenswrapper[4800]: I1125 16:26:09.784050 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:26:09 crc kubenswrapper[4800]: I1125 16:26:09.792627 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:26:09 crc kubenswrapper[4800]: I1125 16:26:09.843664 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:26:10 crc kubenswrapper[4800]: I1125 16:26:10.900812 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:26:10 crc kubenswrapper[4800]: I1125 16:26:10.955626 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bd829"] Nov 25 16:26:12 crc kubenswrapper[4800]: I1125 16:26:12.876318 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bd829" podUID="b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" containerName="registry-server" containerID="cri-o://6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13" gracePeriod=2 Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.515557 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.650422 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-catalog-content\") pod \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\" (UID: \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\") " Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.650588 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-utilities\") pod \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\" (UID: \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\") " Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.650871 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7v8z9\" (UniqueName: \"kubernetes.io/projected/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-kube-api-access-7v8z9\") pod \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\" (UID: \"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5\") " Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.651475 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-utilities" (OuterVolumeSpecName: "utilities") pod "b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" (UID: "b93b19c7-e35e-4346-8e3b-746f1c3a6fa5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.651640 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.665978 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-kube-api-access-7v8z9" (OuterVolumeSpecName: "kube-api-access-7v8z9") pod "b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" (UID: "b93b19c7-e35e-4346-8e3b-746f1c3a6fa5"). InnerVolumeSpecName "kube-api-access-7v8z9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.704247 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" (UID: "b93b19c7-e35e-4346-8e3b-746f1c3a6fa5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.753312 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.753349 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7v8z9\" (UniqueName: \"kubernetes.io/projected/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5-kube-api-access-7v8z9\") on node \"crc\" DevicePath \"\"" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.888236 4800 generic.go:334] "Generic (PLEG): container finished" podID="b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" containerID="6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13" exitCode=0 Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.888281 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bd829" event={"ID":"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5","Type":"ContainerDied","Data":"6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13"} Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.888321 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bd829" event={"ID":"b93b19c7-e35e-4346-8e3b-746f1c3a6fa5","Type":"ContainerDied","Data":"c52b7da65855acc998e3c8488a203e782cdd2dc1e4ad5db6fdab7b49895591a3"} Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.888345 4800 scope.go:117] "RemoveContainer" containerID="6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.889256 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bd829" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.916252 4800 scope.go:117] "RemoveContainer" containerID="034b7f1b38d02580b8df4b2fb1f93c43eca12d8d65b05a67fc20ced0afff8019" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.931366 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bd829"] Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.943738 4800 scope.go:117] "RemoveContainer" containerID="0e716a32a1833af2de356f6bd7e18ba6c88c729bc5b0d97da3e3d902ee1e0a8e" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.944312 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bd829"] Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.993097 4800 scope.go:117] "RemoveContainer" containerID="6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13" Nov 25 16:26:13 crc kubenswrapper[4800]: E1125 16:26:13.993535 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13\": container with ID starting with 6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13 not found: ID does not exist" containerID="6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.993622 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13"} err="failed to get container status \"6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13\": rpc error: code = NotFound desc = could not find container \"6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13\": container with ID starting with 6cad2de19961ec0318132802731b67769617d936fb5ff9ca4d4a47028e2d3f13 not found: ID does not exist" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.993708 4800 scope.go:117] "RemoveContainer" containerID="034b7f1b38d02580b8df4b2fb1f93c43eca12d8d65b05a67fc20ced0afff8019" Nov 25 16:26:13 crc kubenswrapper[4800]: E1125 16:26:13.994307 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"034b7f1b38d02580b8df4b2fb1f93c43eca12d8d65b05a67fc20ced0afff8019\": container with ID starting with 034b7f1b38d02580b8df4b2fb1f93c43eca12d8d65b05a67fc20ced0afff8019 not found: ID does not exist" containerID="034b7f1b38d02580b8df4b2fb1f93c43eca12d8d65b05a67fc20ced0afff8019" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.994372 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"034b7f1b38d02580b8df4b2fb1f93c43eca12d8d65b05a67fc20ced0afff8019"} err="failed to get container status \"034b7f1b38d02580b8df4b2fb1f93c43eca12d8d65b05a67fc20ced0afff8019\": rpc error: code = NotFound desc = could not find container \"034b7f1b38d02580b8df4b2fb1f93c43eca12d8d65b05a67fc20ced0afff8019\": container with ID starting with 034b7f1b38d02580b8df4b2fb1f93c43eca12d8d65b05a67fc20ced0afff8019 not found: ID does not exist" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.994402 4800 scope.go:117] "RemoveContainer" containerID="0e716a32a1833af2de356f6bd7e18ba6c88c729bc5b0d97da3e3d902ee1e0a8e" Nov 25 16:26:13 crc kubenswrapper[4800]: E1125 16:26:13.994915 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e716a32a1833af2de356f6bd7e18ba6c88c729bc5b0d97da3e3d902ee1e0a8e\": container with ID starting with 0e716a32a1833af2de356f6bd7e18ba6c88c729bc5b0d97da3e3d902ee1e0a8e not found: ID does not exist" containerID="0e716a32a1833af2de356f6bd7e18ba6c88c729bc5b0d97da3e3d902ee1e0a8e" Nov 25 16:26:13 crc kubenswrapper[4800]: I1125 16:26:13.994944 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e716a32a1833af2de356f6bd7e18ba6c88c729bc5b0d97da3e3d902ee1e0a8e"} err="failed to get container status \"0e716a32a1833af2de356f6bd7e18ba6c88c729bc5b0d97da3e3d902ee1e0a8e\": rpc error: code = NotFound desc = could not find container \"0e716a32a1833af2de356f6bd7e18ba6c88c729bc5b0d97da3e3d902ee1e0a8e\": container with ID starting with 0e716a32a1833af2de356f6bd7e18ba6c88c729bc5b0d97da3e3d902ee1e0a8e not found: ID does not exist" Nov 25 16:26:15 crc kubenswrapper[4800]: I1125 16:26:15.797000 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" path="/var/lib/kubelet/pods/b93b19c7-e35e-4346-8e3b-746f1c3a6fa5/volumes" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.219824 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bbfgt"] Nov 25 16:27:17 crc kubenswrapper[4800]: E1125 16:27:17.221400 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" containerName="extract-utilities" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.221423 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" containerName="extract-utilities" Nov 25 16:27:17 crc kubenswrapper[4800]: E1125 16:27:17.221449 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" containerName="extract-content" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.221461 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" containerName="extract-content" Nov 25 16:27:17 crc kubenswrapper[4800]: E1125 16:27:17.221506 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" containerName="registry-server" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.221516 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" containerName="registry-server" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.221763 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b93b19c7-e35e-4346-8e3b-746f1c3a6fa5" containerName="registry-server" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.223876 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.233916 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbfgt"] Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.290425 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b24665-78cc-4058-8abd-ad1eda2ba89a-utilities\") pod \"redhat-marketplace-bbfgt\" (UID: \"77b24665-78cc-4058-8abd-ad1eda2ba89a\") " pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.290473 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f8wr\" (UniqueName: \"kubernetes.io/projected/77b24665-78cc-4058-8abd-ad1eda2ba89a-kube-api-access-2f8wr\") pod \"redhat-marketplace-bbfgt\" (UID: \"77b24665-78cc-4058-8abd-ad1eda2ba89a\") " pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.290596 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b24665-78cc-4058-8abd-ad1eda2ba89a-catalog-content\") pod \"redhat-marketplace-bbfgt\" (UID: \"77b24665-78cc-4058-8abd-ad1eda2ba89a\") " pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.394663 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b24665-78cc-4058-8abd-ad1eda2ba89a-utilities\") pod \"redhat-marketplace-bbfgt\" (UID: \"77b24665-78cc-4058-8abd-ad1eda2ba89a\") " pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.394741 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f8wr\" (UniqueName: \"kubernetes.io/projected/77b24665-78cc-4058-8abd-ad1eda2ba89a-kube-api-access-2f8wr\") pod \"redhat-marketplace-bbfgt\" (UID: \"77b24665-78cc-4058-8abd-ad1eda2ba89a\") " pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.394874 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b24665-78cc-4058-8abd-ad1eda2ba89a-catalog-content\") pod \"redhat-marketplace-bbfgt\" (UID: \"77b24665-78cc-4058-8abd-ad1eda2ba89a\") " pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.395997 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b24665-78cc-4058-8abd-ad1eda2ba89a-catalog-content\") pod \"redhat-marketplace-bbfgt\" (UID: \"77b24665-78cc-4058-8abd-ad1eda2ba89a\") " pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.396123 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b24665-78cc-4058-8abd-ad1eda2ba89a-utilities\") pod \"redhat-marketplace-bbfgt\" (UID: \"77b24665-78cc-4058-8abd-ad1eda2ba89a\") " pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.778384 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f8wr\" (UniqueName: \"kubernetes.io/projected/77b24665-78cc-4058-8abd-ad1eda2ba89a-kube-api-access-2f8wr\") pod \"redhat-marketplace-bbfgt\" (UID: \"77b24665-78cc-4058-8abd-ad1eda2ba89a\") " pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:17 crc kubenswrapper[4800]: I1125 16:27:17.892055 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:18 crc kubenswrapper[4800]: I1125 16:27:18.434227 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbfgt"] Nov 25 16:27:18 crc kubenswrapper[4800]: I1125 16:27:18.552171 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbfgt" event={"ID":"77b24665-78cc-4058-8abd-ad1eda2ba89a","Type":"ContainerStarted","Data":"e1cf84cb02e785b9a06ec362133c1c09b67028afe2ce84fa5ceb8a41d1f41332"} Nov 25 16:27:19 crc kubenswrapper[4800]: I1125 16:27:19.572464 4800 generic.go:334] "Generic (PLEG): container finished" podID="77b24665-78cc-4058-8abd-ad1eda2ba89a" containerID="bc9cba9987adcb36b3f1202310b916045d70042fa86744b0f7ebcf015cfcf551" exitCode=0 Nov 25 16:27:19 crc kubenswrapper[4800]: I1125 16:27:19.572616 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbfgt" event={"ID":"77b24665-78cc-4058-8abd-ad1eda2ba89a","Type":"ContainerDied","Data":"bc9cba9987adcb36b3f1202310b916045d70042fa86744b0f7ebcf015cfcf551"} Nov 25 16:27:20 crc kubenswrapper[4800]: I1125 16:27:20.615633 4800 generic.go:334] "Generic (PLEG): container finished" podID="77b24665-78cc-4058-8abd-ad1eda2ba89a" containerID="9a71f4e51f26d09b18d0d4a93c8e8bf16ecb8019e70eeb23c90f76dad902c28b" exitCode=0 Nov 25 16:27:20 crc kubenswrapper[4800]: I1125 16:27:20.616251 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbfgt" event={"ID":"77b24665-78cc-4058-8abd-ad1eda2ba89a","Type":"ContainerDied","Data":"9a71f4e51f26d09b18d0d4a93c8e8bf16ecb8019e70eeb23c90f76dad902c28b"} Nov 25 16:27:21 crc kubenswrapper[4800]: I1125 16:27:21.630665 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbfgt" event={"ID":"77b24665-78cc-4058-8abd-ad1eda2ba89a","Type":"ContainerStarted","Data":"8815ad9bc1e91244fce6c95788c180f369cb0d463bfd814ec4be8c11d603fd8b"} Nov 25 16:27:21 crc kubenswrapper[4800]: I1125 16:27:21.655226 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bbfgt" podStartSLOduration=3.123910725 podStartE2EDuration="4.655202453s" podCreationTimestamp="2025-11-25 16:27:17 +0000 UTC" firstStartedPulling="2025-11-25 16:27:19.575200695 +0000 UTC m=+4200.629609177" lastFinishedPulling="2025-11-25 16:27:21.106492423 +0000 UTC m=+4202.160900905" observedRunningTime="2025-11-25 16:27:21.649000044 +0000 UTC m=+4202.703408526" watchObservedRunningTime="2025-11-25 16:27:21.655202453 +0000 UTC m=+4202.709610935" Nov 25 16:27:27 crc kubenswrapper[4800]: I1125 16:27:27.892351 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:27 crc kubenswrapper[4800]: I1125 16:27:27.892788 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:27 crc kubenswrapper[4800]: I1125 16:27:27.952817 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:29 crc kubenswrapper[4800]: I1125 16:27:29.021795 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:29 crc kubenswrapper[4800]: I1125 16:27:29.098172 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbfgt"] Nov 25 16:27:30 crc kubenswrapper[4800]: I1125 16:27:30.657162 4800 scope.go:117] "RemoveContainer" containerID="5c11093c4c67b2516c5eaf0e12a2be7572d7dd07bd77309142f81007f0eded57" Nov 25 16:27:30 crc kubenswrapper[4800]: I1125 16:27:30.703355 4800 scope.go:117] "RemoveContainer" containerID="c2d8a514fe80b7f859d44f9b4efc5de09b75b4be9f8cd50b40bfff30a0a7bc0f" Nov 25 16:27:30 crc kubenswrapper[4800]: I1125 16:27:30.728340 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bbfgt" podUID="77b24665-78cc-4058-8abd-ad1eda2ba89a" containerName="registry-server" containerID="cri-o://8815ad9bc1e91244fce6c95788c180f369cb0d463bfd814ec4be8c11d603fd8b" gracePeriod=2 Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.192385 4800 scope.go:117] "RemoveContainer" containerID="8d2febdac375be2070f77dc66c7528141c2152c75600ae7adc918a04c2a1d853" Nov 25 16:27:31 crc kubenswrapper[4800]: E1125 16:27:31.484436 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77b24665_78cc_4058_8abd_ad1eda2ba89a.slice/crio-8815ad9bc1e91244fce6c95788c180f369cb0d463bfd814ec4be8c11d603fd8b.scope\": RecentStats: unable to find data in memory cache]" Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.775954 4800 generic.go:334] "Generic (PLEG): container finished" podID="77b24665-78cc-4058-8abd-ad1eda2ba89a" containerID="8815ad9bc1e91244fce6c95788c180f369cb0d463bfd814ec4be8c11d603fd8b" exitCode=0 Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.776049 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbfgt" event={"ID":"77b24665-78cc-4058-8abd-ad1eda2ba89a","Type":"ContainerDied","Data":"8815ad9bc1e91244fce6c95788c180f369cb0d463bfd814ec4be8c11d603fd8b"} Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.776454 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbfgt" event={"ID":"77b24665-78cc-4058-8abd-ad1eda2ba89a","Type":"ContainerDied","Data":"e1cf84cb02e785b9a06ec362133c1c09b67028afe2ce84fa5ceb8a41d1f41332"} Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.776475 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1cf84cb02e785b9a06ec362133c1c09b67028afe2ce84fa5ceb8a41d1f41332" Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.825027 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.875115 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b24665-78cc-4058-8abd-ad1eda2ba89a-utilities\") pod \"77b24665-78cc-4058-8abd-ad1eda2ba89a\" (UID: \"77b24665-78cc-4058-8abd-ad1eda2ba89a\") " Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.875339 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b24665-78cc-4058-8abd-ad1eda2ba89a-catalog-content\") pod \"77b24665-78cc-4058-8abd-ad1eda2ba89a\" (UID: \"77b24665-78cc-4058-8abd-ad1eda2ba89a\") " Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.875733 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2f8wr\" (UniqueName: \"kubernetes.io/projected/77b24665-78cc-4058-8abd-ad1eda2ba89a-kube-api-access-2f8wr\") pod \"77b24665-78cc-4058-8abd-ad1eda2ba89a\" (UID: \"77b24665-78cc-4058-8abd-ad1eda2ba89a\") " Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.877512 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77b24665-78cc-4058-8abd-ad1eda2ba89a-utilities" (OuterVolumeSpecName: "utilities") pod "77b24665-78cc-4058-8abd-ad1eda2ba89a" (UID: "77b24665-78cc-4058-8abd-ad1eda2ba89a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.878925 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b24665-78cc-4058-8abd-ad1eda2ba89a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.887619 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77b24665-78cc-4058-8abd-ad1eda2ba89a-kube-api-access-2f8wr" (OuterVolumeSpecName: "kube-api-access-2f8wr") pod "77b24665-78cc-4058-8abd-ad1eda2ba89a" (UID: "77b24665-78cc-4058-8abd-ad1eda2ba89a"). InnerVolumeSpecName "kube-api-access-2f8wr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.907471 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77b24665-78cc-4058-8abd-ad1eda2ba89a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "77b24665-78cc-4058-8abd-ad1eda2ba89a" (UID: "77b24665-78cc-4058-8abd-ad1eda2ba89a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.980508 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2f8wr\" (UniqueName: \"kubernetes.io/projected/77b24665-78cc-4058-8abd-ad1eda2ba89a-kube-api-access-2f8wr\") on node \"crc\" DevicePath \"\"" Nov 25 16:27:31 crc kubenswrapper[4800]: I1125 16:27:31.980563 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b24665-78cc-4058-8abd-ad1eda2ba89a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:27:32 crc kubenswrapper[4800]: I1125 16:27:32.801388 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbfgt" Nov 25 16:27:32 crc kubenswrapper[4800]: I1125 16:27:32.862717 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbfgt"] Nov 25 16:27:32 crc kubenswrapper[4800]: I1125 16:27:32.874684 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbfgt"] Nov 25 16:27:33 crc kubenswrapper[4800]: I1125 16:27:33.798212 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77b24665-78cc-4058-8abd-ad1eda2ba89a" path="/var/lib/kubelet/pods/77b24665-78cc-4058-8abd-ad1eda2ba89a/volumes" Nov 25 16:28:12 crc kubenswrapper[4800]: I1125 16:28:12.639881 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:28:12 crc kubenswrapper[4800]: I1125 16:28:12.640922 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:28:42 crc kubenswrapper[4800]: I1125 16:28:42.639982 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:28:42 crc kubenswrapper[4800]: I1125 16:28:42.640741 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:29:12 crc kubenswrapper[4800]: I1125 16:29:12.640215 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:29:12 crc kubenswrapper[4800]: I1125 16:29:12.641177 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:29:12 crc kubenswrapper[4800]: I1125 16:29:12.641234 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 16:29:12 crc kubenswrapper[4800]: I1125 16:29:12.642270 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0ff732864e7639bf7b5b2bacf496c8f1f2226123826a3746e9fb054405046b72"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:29:12 crc kubenswrapper[4800]: I1125 16:29:12.642343 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://0ff732864e7639bf7b5b2bacf496c8f1f2226123826a3746e9fb054405046b72" gracePeriod=600 Nov 25 16:29:12 crc kubenswrapper[4800]: I1125 16:29:12.847095 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="0ff732864e7639bf7b5b2bacf496c8f1f2226123826a3746e9fb054405046b72" exitCode=0 Nov 25 16:29:12 crc kubenswrapper[4800]: I1125 16:29:12.847306 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"0ff732864e7639bf7b5b2bacf496c8f1f2226123826a3746e9fb054405046b72"} Nov 25 16:29:12 crc kubenswrapper[4800]: I1125 16:29:12.848097 4800 scope.go:117] "RemoveContainer" containerID="ab5b41d1cc3cb8ce89edbd63a6308914b9778bd7180ee9b099b677a561202107" Nov 25 16:29:13 crc kubenswrapper[4800]: I1125 16:29:13.861974 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49"} Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.257457 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg"] Nov 25 16:30:00 crc kubenswrapper[4800]: E1125 16:30:00.265886 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77b24665-78cc-4058-8abd-ad1eda2ba89a" containerName="registry-server" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.266030 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="77b24665-78cc-4058-8abd-ad1eda2ba89a" containerName="registry-server" Nov 25 16:30:00 crc kubenswrapper[4800]: E1125 16:30:00.266171 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77b24665-78cc-4058-8abd-ad1eda2ba89a" containerName="extract-content" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.266264 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="77b24665-78cc-4058-8abd-ad1eda2ba89a" containerName="extract-content" Nov 25 16:30:00 crc kubenswrapper[4800]: E1125 16:30:00.266370 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77b24665-78cc-4058-8abd-ad1eda2ba89a" containerName="extract-utilities" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.266449 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="77b24665-78cc-4058-8abd-ad1eda2ba89a" containerName="extract-utilities" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.267143 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="77b24665-78cc-4058-8abd-ad1eda2ba89a" containerName="registry-server" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.268574 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.272132 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.272137 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.280226 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg"] Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.328299 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-config-volume\") pod \"collect-profiles-29401470-lnhmg\" (UID: \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.328498 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrb6k\" (UniqueName: \"kubernetes.io/projected/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-kube-api-access-lrb6k\") pod \"collect-profiles-29401470-lnhmg\" (UID: \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.328547 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-secret-volume\") pod \"collect-profiles-29401470-lnhmg\" (UID: \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.431750 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrb6k\" (UniqueName: \"kubernetes.io/projected/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-kube-api-access-lrb6k\") pod \"collect-profiles-29401470-lnhmg\" (UID: \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.432256 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-secret-volume\") pod \"collect-profiles-29401470-lnhmg\" (UID: \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.432305 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-config-volume\") pod \"collect-profiles-29401470-lnhmg\" (UID: \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.433439 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-config-volume\") pod \"collect-profiles-29401470-lnhmg\" (UID: \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.456988 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-secret-volume\") pod \"collect-profiles-29401470-lnhmg\" (UID: \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.457085 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrb6k\" (UniqueName: \"kubernetes.io/projected/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-kube-api-access-lrb6k\") pod \"collect-profiles-29401470-lnhmg\" (UID: \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:00 crc kubenswrapper[4800]: I1125 16:30:00.594166 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:01 crc kubenswrapper[4800]: I1125 16:30:01.112674 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg"] Nov 25 16:30:01 crc kubenswrapper[4800]: I1125 16:30:01.431004 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" event={"ID":"b0e025a4-4be8-4e65-b0d4-f45a77bbad73","Type":"ContainerStarted","Data":"710eda65a0f3236b92b5fa57ecc1200a17c540392a0c527f596049207d65cab0"} Nov 25 16:30:01 crc kubenswrapper[4800]: I1125 16:30:01.431125 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" event={"ID":"b0e025a4-4be8-4e65-b0d4-f45a77bbad73","Type":"ContainerStarted","Data":"2335b6f7e9e0003eaf91f39d8a3ba3cacfc8e681a6640e5a0a134c24c9f242a6"} Nov 25 16:30:01 crc kubenswrapper[4800]: I1125 16:30:01.469207 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" podStartSLOduration=1.469179106 podStartE2EDuration="1.469179106s" podCreationTimestamp="2025-11-25 16:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:30:01.462478493 +0000 UTC m=+4362.516886975" watchObservedRunningTime="2025-11-25 16:30:01.469179106 +0000 UTC m=+4362.523587588" Nov 25 16:30:02 crc kubenswrapper[4800]: I1125 16:30:02.442998 4800 generic.go:334] "Generic (PLEG): container finished" podID="b0e025a4-4be8-4e65-b0d4-f45a77bbad73" containerID="710eda65a0f3236b92b5fa57ecc1200a17c540392a0c527f596049207d65cab0" exitCode=0 Nov 25 16:30:02 crc kubenswrapper[4800]: I1125 16:30:02.443104 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" event={"ID":"b0e025a4-4be8-4e65-b0d4-f45a77bbad73","Type":"ContainerDied","Data":"710eda65a0f3236b92b5fa57ecc1200a17c540392a0c527f596049207d65cab0"} Nov 25 16:30:03 crc kubenswrapper[4800]: I1125 16:30:03.972792 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:04 crc kubenswrapper[4800]: I1125 16:30:04.020943 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-secret-volume\") pod \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\" (UID: \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\") " Nov 25 16:30:04 crc kubenswrapper[4800]: I1125 16:30:04.021809 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-config-volume\") pod \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\" (UID: \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\") " Nov 25 16:30:04 crc kubenswrapper[4800]: I1125 16:30:04.021991 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrb6k\" (UniqueName: \"kubernetes.io/projected/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-kube-api-access-lrb6k\") pod \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\" (UID: \"b0e025a4-4be8-4e65-b0d4-f45a77bbad73\") " Nov 25 16:30:04 crc kubenswrapper[4800]: I1125 16:30:04.025929 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-config-volume" (OuterVolumeSpecName: "config-volume") pod "b0e025a4-4be8-4e65-b0d4-f45a77bbad73" (UID: "b0e025a4-4be8-4e65-b0d4-f45a77bbad73"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:30:04 crc kubenswrapper[4800]: I1125 16:30:04.031221 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-kube-api-access-lrb6k" (OuterVolumeSpecName: "kube-api-access-lrb6k") pod "b0e025a4-4be8-4e65-b0d4-f45a77bbad73" (UID: "b0e025a4-4be8-4e65-b0d4-f45a77bbad73"). InnerVolumeSpecName "kube-api-access-lrb6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:30:04 crc kubenswrapper[4800]: I1125 16:30:04.040146 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b0e025a4-4be8-4e65-b0d4-f45a77bbad73" (UID: "b0e025a4-4be8-4e65-b0d4-f45a77bbad73"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:30:04 crc kubenswrapper[4800]: I1125 16:30:04.125515 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 16:30:04 crc kubenswrapper[4800]: I1125 16:30:04.125568 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrb6k\" (UniqueName: \"kubernetes.io/projected/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-kube-api-access-lrb6k\") on node \"crc\" DevicePath \"\"" Nov 25 16:30:04 crc kubenswrapper[4800]: I1125 16:30:04.125584 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0e025a4-4be8-4e65-b0d4-f45a77bbad73-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 16:30:04 crc kubenswrapper[4800]: I1125 16:30:04.466405 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" event={"ID":"b0e025a4-4be8-4e65-b0d4-f45a77bbad73","Type":"ContainerDied","Data":"2335b6f7e9e0003eaf91f39d8a3ba3cacfc8e681a6640e5a0a134c24c9f242a6"} Nov 25 16:30:04 crc kubenswrapper[4800]: I1125 16:30:04.466458 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2335b6f7e9e0003eaf91f39d8a3ba3cacfc8e681a6640e5a0a134c24c9f242a6" Nov 25 16:30:04 crc kubenswrapper[4800]: I1125 16:30:04.466470 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg" Nov 25 16:30:05 crc kubenswrapper[4800]: I1125 16:30:05.071643 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh"] Nov 25 16:30:05 crc kubenswrapper[4800]: I1125 16:30:05.080269 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401425-s5hzh"] Nov 25 16:30:05 crc kubenswrapper[4800]: I1125 16:30:05.798428 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fe623fd-f427-45ce-a7fd-bb9d5f0062f0" path="/var/lib/kubelet/pods/2fe623fd-f427-45ce-a7fd-bb9d5f0062f0/volumes" Nov 25 16:30:31 crc kubenswrapper[4800]: I1125 16:30:31.488016 4800 scope.go:117] "RemoveContainer" containerID="ea4f67d91d203956267c7b693ba8201eda9a2a3bd866c0c1d0b079997ea8342a" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.550810 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t7mcn"] Nov 25 16:30:55 crc kubenswrapper[4800]: E1125 16:30:55.552115 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0e025a4-4be8-4e65-b0d4-f45a77bbad73" containerName="collect-profiles" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.552133 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0e025a4-4be8-4e65-b0d4-f45a77bbad73" containerName="collect-profiles" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.552342 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0e025a4-4be8-4e65-b0d4-f45a77bbad73" containerName="collect-profiles" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.553954 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.562423 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t7mcn"] Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.586802 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82c191e3-5662-42c7-bb45-a2a24d4f08a9-catalog-content\") pod \"community-operators-t7mcn\" (UID: \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\") " pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.587099 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67fgx\" (UniqueName: \"kubernetes.io/projected/82c191e3-5662-42c7-bb45-a2a24d4f08a9-kube-api-access-67fgx\") pod \"community-operators-t7mcn\" (UID: \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\") " pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.587160 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82c191e3-5662-42c7-bb45-a2a24d4f08a9-utilities\") pod \"community-operators-t7mcn\" (UID: \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\") " pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.689958 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67fgx\" (UniqueName: \"kubernetes.io/projected/82c191e3-5662-42c7-bb45-a2a24d4f08a9-kube-api-access-67fgx\") pod \"community-operators-t7mcn\" (UID: \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\") " pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.690042 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82c191e3-5662-42c7-bb45-a2a24d4f08a9-utilities\") pod \"community-operators-t7mcn\" (UID: \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\") " pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.690196 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82c191e3-5662-42c7-bb45-a2a24d4f08a9-catalog-content\") pod \"community-operators-t7mcn\" (UID: \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\") " pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.690700 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82c191e3-5662-42c7-bb45-a2a24d4f08a9-utilities\") pod \"community-operators-t7mcn\" (UID: \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\") " pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.690710 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82c191e3-5662-42c7-bb45-a2a24d4f08a9-catalog-content\") pod \"community-operators-t7mcn\" (UID: \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\") " pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.712087 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67fgx\" (UniqueName: \"kubernetes.io/projected/82c191e3-5662-42c7-bb45-a2a24d4f08a9-kube-api-access-67fgx\") pod \"community-operators-t7mcn\" (UID: \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\") " pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:30:55 crc kubenswrapper[4800]: I1125 16:30:55.885369 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:30:56 crc kubenswrapper[4800]: I1125 16:30:56.509254 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t7mcn"] Nov 25 16:30:57 crc kubenswrapper[4800]: I1125 16:30:57.107006 4800 generic.go:334] "Generic (PLEG): container finished" podID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerID="b3c29812ad32934b4b8415831b43e371c97f51eb33d421224f3f56af85cb231d" exitCode=0 Nov 25 16:30:57 crc kubenswrapper[4800]: I1125 16:30:57.107107 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t7mcn" event={"ID":"82c191e3-5662-42c7-bb45-a2a24d4f08a9","Type":"ContainerDied","Data":"b3c29812ad32934b4b8415831b43e371c97f51eb33d421224f3f56af85cb231d"} Nov 25 16:30:57 crc kubenswrapper[4800]: I1125 16:30:57.107538 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t7mcn" event={"ID":"82c191e3-5662-42c7-bb45-a2a24d4f08a9","Type":"ContainerStarted","Data":"0f3c229238721005d99c74ca1c3cd230013af4830c6e5777f61f6a63c9ad8568"} Nov 25 16:31:00 crc kubenswrapper[4800]: I1125 16:31:00.137650 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t7mcn" event={"ID":"82c191e3-5662-42c7-bb45-a2a24d4f08a9","Type":"ContainerStarted","Data":"d252420fe9fde0e6fa8765c0c77434cf6c8c73f592319068f8d2b2acca9154e8"} Nov 25 16:31:02 crc kubenswrapper[4800]: I1125 16:31:02.990482 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jkn6x"] Nov 25 16:31:02 crc kubenswrapper[4800]: I1125 16:31:02.994549 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:03 crc kubenswrapper[4800]: I1125 16:31:03.006007 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jkn6x"] Nov 25 16:31:03 crc kubenswrapper[4800]: I1125 16:31:03.173447 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwktj\" (UniqueName: \"kubernetes.io/projected/1bdd692c-ef89-4ee2-bf92-933532908dd8-kube-api-access-pwktj\") pod \"redhat-operators-jkn6x\" (UID: \"1bdd692c-ef89-4ee2-bf92-933532908dd8\") " pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:03 crc kubenswrapper[4800]: I1125 16:31:03.173549 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bdd692c-ef89-4ee2-bf92-933532908dd8-utilities\") pod \"redhat-operators-jkn6x\" (UID: \"1bdd692c-ef89-4ee2-bf92-933532908dd8\") " pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:03 crc kubenswrapper[4800]: I1125 16:31:03.173621 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bdd692c-ef89-4ee2-bf92-933532908dd8-catalog-content\") pod \"redhat-operators-jkn6x\" (UID: \"1bdd692c-ef89-4ee2-bf92-933532908dd8\") " pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:03 crc kubenswrapper[4800]: I1125 16:31:03.275594 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwktj\" (UniqueName: \"kubernetes.io/projected/1bdd692c-ef89-4ee2-bf92-933532908dd8-kube-api-access-pwktj\") pod \"redhat-operators-jkn6x\" (UID: \"1bdd692c-ef89-4ee2-bf92-933532908dd8\") " pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:03 crc kubenswrapper[4800]: I1125 16:31:03.275723 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bdd692c-ef89-4ee2-bf92-933532908dd8-utilities\") pod \"redhat-operators-jkn6x\" (UID: \"1bdd692c-ef89-4ee2-bf92-933532908dd8\") " pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:03 crc kubenswrapper[4800]: I1125 16:31:03.275807 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bdd692c-ef89-4ee2-bf92-933532908dd8-catalog-content\") pod \"redhat-operators-jkn6x\" (UID: \"1bdd692c-ef89-4ee2-bf92-933532908dd8\") " pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:03 crc kubenswrapper[4800]: I1125 16:31:03.276563 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bdd692c-ef89-4ee2-bf92-933532908dd8-utilities\") pod \"redhat-operators-jkn6x\" (UID: \"1bdd692c-ef89-4ee2-bf92-933532908dd8\") " pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:03 crc kubenswrapper[4800]: I1125 16:31:03.276769 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bdd692c-ef89-4ee2-bf92-933532908dd8-catalog-content\") pod \"redhat-operators-jkn6x\" (UID: \"1bdd692c-ef89-4ee2-bf92-933532908dd8\") " pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:03 crc kubenswrapper[4800]: I1125 16:31:03.303258 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwktj\" (UniqueName: \"kubernetes.io/projected/1bdd692c-ef89-4ee2-bf92-933532908dd8-kube-api-access-pwktj\") pod \"redhat-operators-jkn6x\" (UID: \"1bdd692c-ef89-4ee2-bf92-933532908dd8\") " pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:03 crc kubenswrapper[4800]: I1125 16:31:03.349551 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:03 crc kubenswrapper[4800]: I1125 16:31:03.895237 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jkn6x"] Nov 25 16:31:03 crc kubenswrapper[4800]: W1125 16:31:03.897230 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1bdd692c_ef89_4ee2_bf92_933532908dd8.slice/crio-7b2bb49f07d4947781c06ba9e0cc7b905bd7ac9a717527b6faeff85bfbed7fa1 WatchSource:0}: Error finding container 7b2bb49f07d4947781c06ba9e0cc7b905bd7ac9a717527b6faeff85bfbed7fa1: Status 404 returned error can't find the container with id 7b2bb49f07d4947781c06ba9e0cc7b905bd7ac9a717527b6faeff85bfbed7fa1 Nov 25 16:31:04 crc kubenswrapper[4800]: I1125 16:31:04.178538 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jkn6x" event={"ID":"1bdd692c-ef89-4ee2-bf92-933532908dd8","Type":"ContainerStarted","Data":"7b2bb49f07d4947781c06ba9e0cc7b905bd7ac9a717527b6faeff85bfbed7fa1"} Nov 25 16:31:05 crc kubenswrapper[4800]: I1125 16:31:05.192685 4800 generic.go:334] "Generic (PLEG): container finished" podID="1bdd692c-ef89-4ee2-bf92-933532908dd8" containerID="8c519cbcb901a82d3481977880f8b6efaf2e5b70b502ade7114f63b907c32296" exitCode=0 Nov 25 16:31:05 crc kubenswrapper[4800]: I1125 16:31:05.192787 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jkn6x" event={"ID":"1bdd692c-ef89-4ee2-bf92-933532908dd8","Type":"ContainerDied","Data":"8c519cbcb901a82d3481977880f8b6efaf2e5b70b502ade7114f63b907c32296"} Nov 25 16:31:05 crc kubenswrapper[4800]: I1125 16:31:05.196145 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 16:31:12 crc kubenswrapper[4800]: I1125 16:31:12.270364 4800 generic.go:334] "Generic (PLEG): container finished" podID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerID="d252420fe9fde0e6fa8765c0c77434cf6c8c73f592319068f8d2b2acca9154e8" exitCode=0 Nov 25 16:31:12 crc kubenswrapper[4800]: I1125 16:31:12.270511 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t7mcn" event={"ID":"82c191e3-5662-42c7-bb45-a2a24d4f08a9","Type":"ContainerDied","Data":"d252420fe9fde0e6fa8765c0c77434cf6c8c73f592319068f8d2b2acca9154e8"} Nov 25 16:31:12 crc kubenswrapper[4800]: I1125 16:31:12.275202 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jkn6x" event={"ID":"1bdd692c-ef89-4ee2-bf92-933532908dd8","Type":"ContainerStarted","Data":"a2ab18df11699525221af431dce857083df9e4e44a934cef9bdfe00e17226e43"} Nov 25 16:31:14 crc kubenswrapper[4800]: I1125 16:31:14.298443 4800 generic.go:334] "Generic (PLEG): container finished" podID="1bdd692c-ef89-4ee2-bf92-933532908dd8" containerID="a2ab18df11699525221af431dce857083df9e4e44a934cef9bdfe00e17226e43" exitCode=0 Nov 25 16:31:14 crc kubenswrapper[4800]: I1125 16:31:14.298523 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jkn6x" event={"ID":"1bdd692c-ef89-4ee2-bf92-933532908dd8","Type":"ContainerDied","Data":"a2ab18df11699525221af431dce857083df9e4e44a934cef9bdfe00e17226e43"} Nov 25 16:31:17 crc kubenswrapper[4800]: I1125 16:31:17.330589 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t7mcn" event={"ID":"82c191e3-5662-42c7-bb45-a2a24d4f08a9","Type":"ContainerStarted","Data":"077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b"} Nov 25 16:31:18 crc kubenswrapper[4800]: I1125 16:31:18.359791 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t7mcn" podStartSLOduration=5.078775356 podStartE2EDuration="23.359770517s" podCreationTimestamp="2025-11-25 16:30:55 +0000 UTC" firstStartedPulling="2025-11-25 16:30:57.109569523 +0000 UTC m=+4418.163978005" lastFinishedPulling="2025-11-25 16:31:15.390564684 +0000 UTC m=+4436.444973166" observedRunningTime="2025-11-25 16:31:18.356646462 +0000 UTC m=+4439.411054944" watchObservedRunningTime="2025-11-25 16:31:18.359770517 +0000 UTC m=+4439.414178999" Nov 25 16:31:20 crc kubenswrapper[4800]: I1125 16:31:20.376562 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jkn6x" event={"ID":"1bdd692c-ef89-4ee2-bf92-933532908dd8","Type":"ContainerStarted","Data":"910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe"} Nov 25 16:31:20 crc kubenswrapper[4800]: I1125 16:31:20.415395 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jkn6x" podStartSLOduration=3.849176145 podStartE2EDuration="18.415361203s" podCreationTimestamp="2025-11-25 16:31:02 +0000 UTC" firstStartedPulling="2025-11-25 16:31:05.195864054 +0000 UTC m=+4426.250272536" lastFinishedPulling="2025-11-25 16:31:19.762049112 +0000 UTC m=+4440.816457594" observedRunningTime="2025-11-25 16:31:20.403692415 +0000 UTC m=+4441.458100907" watchObservedRunningTime="2025-11-25 16:31:20.415361203 +0000 UTC m=+4441.469769685" Nov 25 16:31:23 crc kubenswrapper[4800]: I1125 16:31:23.350340 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:23 crc kubenswrapper[4800]: I1125 16:31:23.351351 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:24 crc kubenswrapper[4800]: I1125 16:31:24.406436 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jkn6x" podUID="1bdd692c-ef89-4ee2-bf92-933532908dd8" containerName="registry-server" probeResult="failure" output=< Nov 25 16:31:24 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 16:31:24 crc kubenswrapper[4800]: > Nov 25 16:31:25 crc kubenswrapper[4800]: I1125 16:31:25.885705 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:31:25 crc kubenswrapper[4800]: I1125 16:31:25.886263 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:31:26 crc kubenswrapper[4800]: I1125 16:31:26.936568 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-t7mcn" podUID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerName="registry-server" probeResult="failure" output=< Nov 25 16:31:26 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 16:31:26 crc kubenswrapper[4800]: > Nov 25 16:31:33 crc kubenswrapper[4800]: I1125 16:31:33.411459 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:33 crc kubenswrapper[4800]: I1125 16:31:33.482650 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:34 crc kubenswrapper[4800]: I1125 16:31:34.171484 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jkn6x"] Nov 25 16:31:34 crc kubenswrapper[4800]: I1125 16:31:34.539992 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jkn6x" podUID="1bdd692c-ef89-4ee2-bf92-933532908dd8" containerName="registry-server" containerID="cri-o://910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe" gracePeriod=2 Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.222418 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.390044 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bdd692c-ef89-4ee2-bf92-933532908dd8-catalog-content\") pod \"1bdd692c-ef89-4ee2-bf92-933532908dd8\" (UID: \"1bdd692c-ef89-4ee2-bf92-933532908dd8\") " Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.390128 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pwktj\" (UniqueName: \"kubernetes.io/projected/1bdd692c-ef89-4ee2-bf92-933532908dd8-kube-api-access-pwktj\") pod \"1bdd692c-ef89-4ee2-bf92-933532908dd8\" (UID: \"1bdd692c-ef89-4ee2-bf92-933532908dd8\") " Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.390400 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bdd692c-ef89-4ee2-bf92-933532908dd8-utilities\") pod \"1bdd692c-ef89-4ee2-bf92-933532908dd8\" (UID: \"1bdd692c-ef89-4ee2-bf92-933532908dd8\") " Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.391403 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1bdd692c-ef89-4ee2-bf92-933532908dd8-utilities" (OuterVolumeSpecName: "utilities") pod "1bdd692c-ef89-4ee2-bf92-933532908dd8" (UID: "1bdd692c-ef89-4ee2-bf92-933532908dd8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.402011 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bdd692c-ef89-4ee2-bf92-933532908dd8-kube-api-access-pwktj" (OuterVolumeSpecName: "kube-api-access-pwktj") pod "1bdd692c-ef89-4ee2-bf92-933532908dd8" (UID: "1bdd692c-ef89-4ee2-bf92-933532908dd8"). InnerVolumeSpecName "kube-api-access-pwktj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.492924 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pwktj\" (UniqueName: \"kubernetes.io/projected/1bdd692c-ef89-4ee2-bf92-933532908dd8-kube-api-access-pwktj\") on node \"crc\" DevicePath \"\"" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.493266 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bdd692c-ef89-4ee2-bf92-933532908dd8-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.498765 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1bdd692c-ef89-4ee2-bf92-933532908dd8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1bdd692c-ef89-4ee2-bf92-933532908dd8" (UID: "1bdd692c-ef89-4ee2-bf92-933532908dd8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.552259 4800 generic.go:334] "Generic (PLEG): container finished" podID="1bdd692c-ef89-4ee2-bf92-933532908dd8" containerID="910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe" exitCode=0 Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.552318 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jkn6x" event={"ID":"1bdd692c-ef89-4ee2-bf92-933532908dd8","Type":"ContainerDied","Data":"910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe"} Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.552364 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jkn6x" event={"ID":"1bdd692c-ef89-4ee2-bf92-933532908dd8","Type":"ContainerDied","Data":"7b2bb49f07d4947781c06ba9e0cc7b905bd7ac9a717527b6faeff85bfbed7fa1"} Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.552389 4800 scope.go:117] "RemoveContainer" containerID="910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.553917 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jkn6x" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.580769 4800 scope.go:117] "RemoveContainer" containerID="a2ab18df11699525221af431dce857083df9e4e44a934cef9bdfe00e17226e43" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.605491 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bdd692c-ef89-4ee2-bf92-933532908dd8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.620562 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jkn6x"] Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.631884 4800 scope.go:117] "RemoveContainer" containerID="8c519cbcb901a82d3481977880f8b6efaf2e5b70b502ade7114f63b907c32296" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.642297 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jkn6x"] Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.679615 4800 scope.go:117] "RemoveContainer" containerID="910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe" Nov 25 16:31:35 crc kubenswrapper[4800]: E1125 16:31:35.680898 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe\": container with ID starting with 910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe not found: ID does not exist" containerID="910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.680958 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe"} err="failed to get container status \"910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe\": rpc error: code = NotFound desc = could not find container \"910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe\": container with ID starting with 910b7caf36c159ef73efca127bfaa5231a166700700b7eff1b78d50f2a81eebe not found: ID does not exist" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.680996 4800 scope.go:117] "RemoveContainer" containerID="a2ab18df11699525221af431dce857083df9e4e44a934cef9bdfe00e17226e43" Nov 25 16:31:35 crc kubenswrapper[4800]: E1125 16:31:35.683431 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2ab18df11699525221af431dce857083df9e4e44a934cef9bdfe00e17226e43\": container with ID starting with a2ab18df11699525221af431dce857083df9e4e44a934cef9bdfe00e17226e43 not found: ID does not exist" containerID="a2ab18df11699525221af431dce857083df9e4e44a934cef9bdfe00e17226e43" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.683464 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2ab18df11699525221af431dce857083df9e4e44a934cef9bdfe00e17226e43"} err="failed to get container status \"a2ab18df11699525221af431dce857083df9e4e44a934cef9bdfe00e17226e43\": rpc error: code = NotFound desc = could not find container \"a2ab18df11699525221af431dce857083df9e4e44a934cef9bdfe00e17226e43\": container with ID starting with a2ab18df11699525221af431dce857083df9e4e44a934cef9bdfe00e17226e43 not found: ID does not exist" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.683489 4800 scope.go:117] "RemoveContainer" containerID="8c519cbcb901a82d3481977880f8b6efaf2e5b70b502ade7114f63b907c32296" Nov 25 16:31:35 crc kubenswrapper[4800]: E1125 16:31:35.683994 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c519cbcb901a82d3481977880f8b6efaf2e5b70b502ade7114f63b907c32296\": container with ID starting with 8c519cbcb901a82d3481977880f8b6efaf2e5b70b502ade7114f63b907c32296 not found: ID does not exist" containerID="8c519cbcb901a82d3481977880f8b6efaf2e5b70b502ade7114f63b907c32296" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.684046 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c519cbcb901a82d3481977880f8b6efaf2e5b70b502ade7114f63b907c32296"} err="failed to get container status \"8c519cbcb901a82d3481977880f8b6efaf2e5b70b502ade7114f63b907c32296\": rpc error: code = NotFound desc = could not find container \"8c519cbcb901a82d3481977880f8b6efaf2e5b70b502ade7114f63b907c32296\": container with ID starting with 8c519cbcb901a82d3481977880f8b6efaf2e5b70b502ade7114f63b907c32296 not found: ID does not exist" Nov 25 16:31:35 crc kubenswrapper[4800]: I1125 16:31:35.799408 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bdd692c-ef89-4ee2-bf92-933532908dd8" path="/var/lib/kubelet/pods/1bdd692c-ef89-4ee2-bf92-933532908dd8/volumes" Nov 25 16:31:36 crc kubenswrapper[4800]: I1125 16:31:36.947891 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-t7mcn" podUID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerName="registry-server" probeResult="failure" output=< Nov 25 16:31:36 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 16:31:36 crc kubenswrapper[4800]: > Nov 25 16:31:42 crc kubenswrapper[4800]: I1125 16:31:42.640117 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:31:42 crc kubenswrapper[4800]: I1125 16:31:42.641041 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:31:45 crc kubenswrapper[4800]: I1125 16:31:45.939046 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:31:46 crc kubenswrapper[4800]: I1125 16:31:46.001153 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:31:46 crc kubenswrapper[4800]: I1125 16:31:46.184703 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t7mcn"] Nov 25 16:31:47 crc kubenswrapper[4800]: I1125 16:31:47.672315 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-t7mcn" podUID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerName="registry-server" containerID="cri-o://077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b" gracePeriod=2 Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.571822 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.685807 4800 generic.go:334] "Generic (PLEG): container finished" podID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerID="077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b" exitCode=0 Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.685945 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t7mcn" event={"ID":"82c191e3-5662-42c7-bb45-a2a24d4f08a9","Type":"ContainerDied","Data":"077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b"} Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.686027 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t7mcn" event={"ID":"82c191e3-5662-42c7-bb45-a2a24d4f08a9","Type":"ContainerDied","Data":"0f3c229238721005d99c74ca1c3cd230013af4830c6e5777f61f6a63c9ad8568"} Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.685983 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t7mcn" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.686057 4800 scope.go:117] "RemoveContainer" containerID="077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.711972 4800 scope.go:117] "RemoveContainer" containerID="d252420fe9fde0e6fa8765c0c77434cf6c8c73f592319068f8d2b2acca9154e8" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.737824 4800 scope.go:117] "RemoveContainer" containerID="b3c29812ad32934b4b8415831b43e371c97f51eb33d421224f3f56af85cb231d" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.762218 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67fgx\" (UniqueName: \"kubernetes.io/projected/82c191e3-5662-42c7-bb45-a2a24d4f08a9-kube-api-access-67fgx\") pod \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\" (UID: \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\") " Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.762550 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82c191e3-5662-42c7-bb45-a2a24d4f08a9-utilities\") pod \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\" (UID: \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\") " Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.762667 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82c191e3-5662-42c7-bb45-a2a24d4f08a9-catalog-content\") pod \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\" (UID: \"82c191e3-5662-42c7-bb45-a2a24d4f08a9\") " Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.763506 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82c191e3-5662-42c7-bb45-a2a24d4f08a9-utilities" (OuterVolumeSpecName: "utilities") pod "82c191e3-5662-42c7-bb45-a2a24d4f08a9" (UID: "82c191e3-5662-42c7-bb45-a2a24d4f08a9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.771361 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82c191e3-5662-42c7-bb45-a2a24d4f08a9-kube-api-access-67fgx" (OuterVolumeSpecName: "kube-api-access-67fgx") pod "82c191e3-5662-42c7-bb45-a2a24d4f08a9" (UID: "82c191e3-5662-42c7-bb45-a2a24d4f08a9"). InnerVolumeSpecName "kube-api-access-67fgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.817838 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82c191e3-5662-42c7-bb45-a2a24d4f08a9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "82c191e3-5662-42c7-bb45-a2a24d4f08a9" (UID: "82c191e3-5662-42c7-bb45-a2a24d4f08a9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.856111 4800 scope.go:117] "RemoveContainer" containerID="077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b" Nov 25 16:31:48 crc kubenswrapper[4800]: E1125 16:31:48.857009 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b\": container with ID starting with 077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b not found: ID does not exist" containerID="077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.857173 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b"} err="failed to get container status \"077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b\": rpc error: code = NotFound desc = could not find container \"077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b\": container with ID starting with 077e038212049718733a9c649441a6517d61c74446db0e298d38d1ca9aa5956b not found: ID does not exist" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.857254 4800 scope.go:117] "RemoveContainer" containerID="d252420fe9fde0e6fa8765c0c77434cf6c8c73f592319068f8d2b2acca9154e8" Nov 25 16:31:48 crc kubenswrapper[4800]: E1125 16:31:48.857820 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d252420fe9fde0e6fa8765c0c77434cf6c8c73f592319068f8d2b2acca9154e8\": container with ID starting with d252420fe9fde0e6fa8765c0c77434cf6c8c73f592319068f8d2b2acca9154e8 not found: ID does not exist" containerID="d252420fe9fde0e6fa8765c0c77434cf6c8c73f592319068f8d2b2acca9154e8" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.857868 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d252420fe9fde0e6fa8765c0c77434cf6c8c73f592319068f8d2b2acca9154e8"} err="failed to get container status \"d252420fe9fde0e6fa8765c0c77434cf6c8c73f592319068f8d2b2acca9154e8\": rpc error: code = NotFound desc = could not find container \"d252420fe9fde0e6fa8765c0c77434cf6c8c73f592319068f8d2b2acca9154e8\": container with ID starting with d252420fe9fde0e6fa8765c0c77434cf6c8c73f592319068f8d2b2acca9154e8 not found: ID does not exist" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.857908 4800 scope.go:117] "RemoveContainer" containerID="b3c29812ad32934b4b8415831b43e371c97f51eb33d421224f3f56af85cb231d" Nov 25 16:31:48 crc kubenswrapper[4800]: E1125 16:31:48.858247 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3c29812ad32934b4b8415831b43e371c97f51eb33d421224f3f56af85cb231d\": container with ID starting with b3c29812ad32934b4b8415831b43e371c97f51eb33d421224f3f56af85cb231d not found: ID does not exist" containerID="b3c29812ad32934b4b8415831b43e371c97f51eb33d421224f3f56af85cb231d" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.858278 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3c29812ad32934b4b8415831b43e371c97f51eb33d421224f3f56af85cb231d"} err="failed to get container status \"b3c29812ad32934b4b8415831b43e371c97f51eb33d421224f3f56af85cb231d\": rpc error: code = NotFound desc = could not find container \"b3c29812ad32934b4b8415831b43e371c97f51eb33d421224f3f56af85cb231d\": container with ID starting with b3c29812ad32934b4b8415831b43e371c97f51eb33d421224f3f56af85cb231d not found: ID does not exist" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.865057 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82c191e3-5662-42c7-bb45-a2a24d4f08a9-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.865106 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82c191e3-5662-42c7-bb45-a2a24d4f08a9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:31:48 crc kubenswrapper[4800]: I1125 16:31:48.865123 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67fgx\" (UniqueName: \"kubernetes.io/projected/82c191e3-5662-42c7-bb45-a2a24d4f08a9-kube-api-access-67fgx\") on node \"crc\" DevicePath \"\"" Nov 25 16:31:49 crc kubenswrapper[4800]: I1125 16:31:49.031519 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t7mcn"] Nov 25 16:31:49 crc kubenswrapper[4800]: I1125 16:31:49.048155 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-t7mcn"] Nov 25 16:31:49 crc kubenswrapper[4800]: I1125 16:31:49.803189 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" path="/var/lib/kubelet/pods/82c191e3-5662-42c7-bb45-a2a24d4f08a9/volumes" Nov 25 16:32:12 crc kubenswrapper[4800]: I1125 16:32:12.639929 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:32:12 crc kubenswrapper[4800]: I1125 16:32:12.640529 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:32:42 crc kubenswrapper[4800]: I1125 16:32:42.640203 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:32:42 crc kubenswrapper[4800]: I1125 16:32:42.641128 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:32:42 crc kubenswrapper[4800]: I1125 16:32:42.641183 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 16:32:42 crc kubenswrapper[4800]: I1125 16:32:42.642168 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:32:42 crc kubenswrapper[4800]: I1125 16:32:42.642230 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" gracePeriod=600 Nov 25 16:32:43 crc kubenswrapper[4800]: I1125 16:32:43.244907 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" exitCode=0 Nov 25 16:32:43 crc kubenswrapper[4800]: I1125 16:32:43.244949 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49"} Nov 25 16:32:43 crc kubenswrapper[4800]: I1125 16:32:43.245023 4800 scope.go:117] "RemoveContainer" containerID="0ff732864e7639bf7b5b2bacf496c8f1f2226123826a3746e9fb054405046b72" Nov 25 16:32:43 crc kubenswrapper[4800]: E1125 16:32:43.289801 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:32:44 crc kubenswrapper[4800]: I1125 16:32:44.258158 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:32:44 crc kubenswrapper[4800]: E1125 16:32:44.258865 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:32:55 crc kubenswrapper[4800]: I1125 16:32:55.785316 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:32:55 crc kubenswrapper[4800]: E1125 16:32:55.786475 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:33:10 crc kubenswrapper[4800]: I1125 16:33:10.786538 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:33:10 crc kubenswrapper[4800]: E1125 16:33:10.787508 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:33:23 crc kubenswrapper[4800]: I1125 16:33:23.785807 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:33:23 crc kubenswrapper[4800]: E1125 16:33:23.786987 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:33:31 crc kubenswrapper[4800]: I1125 16:33:31.698374 4800 scope.go:117] "RemoveContainer" containerID="8815ad9bc1e91244fce6c95788c180f369cb0d463bfd814ec4be8c11d603fd8b" Nov 25 16:33:31 crc kubenswrapper[4800]: I1125 16:33:31.734975 4800 scope.go:117] "RemoveContainer" containerID="9a71f4e51f26d09b18d0d4a93c8e8bf16ecb8019e70eeb23c90f76dad902c28b" Nov 25 16:33:31 crc kubenswrapper[4800]: I1125 16:33:31.757873 4800 scope.go:117] "RemoveContainer" containerID="bc9cba9987adcb36b3f1202310b916045d70042fa86744b0f7ebcf015cfcf551" Nov 25 16:33:36 crc kubenswrapper[4800]: I1125 16:33:36.786164 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:33:36 crc kubenswrapper[4800]: E1125 16:33:36.787454 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:33:49 crc kubenswrapper[4800]: I1125 16:33:49.793584 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:33:49 crc kubenswrapper[4800]: E1125 16:33:49.794783 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:34:02 crc kubenswrapper[4800]: I1125 16:34:02.785926 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:34:02 crc kubenswrapper[4800]: E1125 16:34:02.787025 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:34:16 crc kubenswrapper[4800]: I1125 16:34:16.786569 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:34:16 crc kubenswrapper[4800]: E1125 16:34:16.787561 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:34:31 crc kubenswrapper[4800]: I1125 16:34:31.785705 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:34:31 crc kubenswrapper[4800]: E1125 16:34:31.786499 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:34:42 crc kubenswrapper[4800]: I1125 16:34:42.786145 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:34:42 crc kubenswrapper[4800]: E1125 16:34:42.787246 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:34:57 crc kubenswrapper[4800]: I1125 16:34:57.786539 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:34:57 crc kubenswrapper[4800]: E1125 16:34:57.787830 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:35:10 crc kubenswrapper[4800]: I1125 16:35:10.786767 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:35:10 crc kubenswrapper[4800]: E1125 16:35:10.787682 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:35:21 crc kubenswrapper[4800]: I1125 16:35:21.785093 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:35:21 crc kubenswrapper[4800]: E1125 16:35:21.786111 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:35:33 crc kubenswrapper[4800]: I1125 16:35:33.787378 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:35:33 crc kubenswrapper[4800]: E1125 16:35:33.788536 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:35:46 crc kubenswrapper[4800]: I1125 16:35:46.785381 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:35:46 crc kubenswrapper[4800]: E1125 16:35:46.786421 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:35:57 crc kubenswrapper[4800]: I1125 16:35:57.785826 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:35:57 crc kubenswrapper[4800]: E1125 16:35:57.786756 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:36:12 crc kubenswrapper[4800]: I1125 16:36:12.785628 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:36:12 crc kubenswrapper[4800]: E1125 16:36:12.786770 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:36:24 crc kubenswrapper[4800]: I1125 16:36:24.786679 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:36:24 crc kubenswrapper[4800]: E1125 16:36:24.787970 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:36:36 crc kubenswrapper[4800]: I1125 16:36:36.785410 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:36:36 crc kubenswrapper[4800]: E1125 16:36:36.786308 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:36:51 crc kubenswrapper[4800]: I1125 16:36:51.786192 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:36:51 crc kubenswrapper[4800]: E1125 16:36:51.787227 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:37:02 crc kubenswrapper[4800]: I1125 16:37:02.786056 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:37:02 crc kubenswrapper[4800]: E1125 16:37:02.787357 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:37:07 crc kubenswrapper[4800]: I1125 16:37:07.847079 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4qhtb"] Nov 25 16:37:07 crc kubenswrapper[4800]: E1125 16:37:07.848476 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerName="extract-utilities" Nov 25 16:37:07 crc kubenswrapper[4800]: I1125 16:37:07.848500 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerName="extract-utilities" Nov 25 16:37:07 crc kubenswrapper[4800]: E1125 16:37:07.848535 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerName="extract-content" Nov 25 16:37:07 crc kubenswrapper[4800]: I1125 16:37:07.848547 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerName="extract-content" Nov 25 16:37:07 crc kubenswrapper[4800]: E1125 16:37:07.848571 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerName="registry-server" Nov 25 16:37:07 crc kubenswrapper[4800]: I1125 16:37:07.848583 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerName="registry-server" Nov 25 16:37:07 crc kubenswrapper[4800]: E1125 16:37:07.848623 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bdd692c-ef89-4ee2-bf92-933532908dd8" containerName="registry-server" Nov 25 16:37:07 crc kubenswrapper[4800]: I1125 16:37:07.848634 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bdd692c-ef89-4ee2-bf92-933532908dd8" containerName="registry-server" Nov 25 16:37:07 crc kubenswrapper[4800]: E1125 16:37:07.848668 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bdd692c-ef89-4ee2-bf92-933532908dd8" containerName="extract-content" Nov 25 16:37:07 crc kubenswrapper[4800]: I1125 16:37:07.848677 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bdd692c-ef89-4ee2-bf92-933532908dd8" containerName="extract-content" Nov 25 16:37:07 crc kubenswrapper[4800]: E1125 16:37:07.848703 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bdd692c-ef89-4ee2-bf92-933532908dd8" containerName="extract-utilities" Nov 25 16:37:07 crc kubenswrapper[4800]: I1125 16:37:07.848713 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bdd692c-ef89-4ee2-bf92-933532908dd8" containerName="extract-utilities" Nov 25 16:37:07 crc kubenswrapper[4800]: I1125 16:37:07.849036 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bdd692c-ef89-4ee2-bf92-933532908dd8" containerName="registry-server" Nov 25 16:37:07 crc kubenswrapper[4800]: I1125 16:37:07.849069 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="82c191e3-5662-42c7-bb45-a2a24d4f08a9" containerName="registry-server" Nov 25 16:37:07 crc kubenswrapper[4800]: I1125 16:37:07.851015 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:07 crc kubenswrapper[4800]: I1125 16:37:07.868772 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4qhtb"] Nov 25 16:37:08 crc kubenswrapper[4800]: I1125 16:37:07.999978 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03946100-a692-485c-a49a-f6ef116f81dd-utilities\") pod \"certified-operators-4qhtb\" (UID: \"03946100-a692-485c-a49a-f6ef116f81dd\") " pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:08 crc kubenswrapper[4800]: I1125 16:37:08.000079 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03946100-a692-485c-a49a-f6ef116f81dd-catalog-content\") pod \"certified-operators-4qhtb\" (UID: \"03946100-a692-485c-a49a-f6ef116f81dd\") " pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:08 crc kubenswrapper[4800]: I1125 16:37:08.000108 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grlsj\" (UniqueName: \"kubernetes.io/projected/03946100-a692-485c-a49a-f6ef116f81dd-kube-api-access-grlsj\") pod \"certified-operators-4qhtb\" (UID: \"03946100-a692-485c-a49a-f6ef116f81dd\") " pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:08 crc kubenswrapper[4800]: I1125 16:37:08.102637 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03946100-a692-485c-a49a-f6ef116f81dd-catalog-content\") pod \"certified-operators-4qhtb\" (UID: \"03946100-a692-485c-a49a-f6ef116f81dd\") " pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:08 crc kubenswrapper[4800]: I1125 16:37:08.102703 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grlsj\" (UniqueName: \"kubernetes.io/projected/03946100-a692-485c-a49a-f6ef116f81dd-kube-api-access-grlsj\") pod \"certified-operators-4qhtb\" (UID: \"03946100-a692-485c-a49a-f6ef116f81dd\") " pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:08 crc kubenswrapper[4800]: I1125 16:37:08.102936 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03946100-a692-485c-a49a-f6ef116f81dd-utilities\") pod \"certified-operators-4qhtb\" (UID: \"03946100-a692-485c-a49a-f6ef116f81dd\") " pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:08 crc kubenswrapper[4800]: I1125 16:37:08.103506 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03946100-a692-485c-a49a-f6ef116f81dd-utilities\") pod \"certified-operators-4qhtb\" (UID: \"03946100-a692-485c-a49a-f6ef116f81dd\") " pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:08 crc kubenswrapper[4800]: I1125 16:37:08.103719 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03946100-a692-485c-a49a-f6ef116f81dd-catalog-content\") pod \"certified-operators-4qhtb\" (UID: \"03946100-a692-485c-a49a-f6ef116f81dd\") " pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:08 crc kubenswrapper[4800]: I1125 16:37:08.125901 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grlsj\" (UniqueName: \"kubernetes.io/projected/03946100-a692-485c-a49a-f6ef116f81dd-kube-api-access-grlsj\") pod \"certified-operators-4qhtb\" (UID: \"03946100-a692-485c-a49a-f6ef116f81dd\") " pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:08 crc kubenswrapper[4800]: I1125 16:37:08.173547 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:08 crc kubenswrapper[4800]: I1125 16:37:08.913952 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4qhtb"] Nov 25 16:37:09 crc kubenswrapper[4800]: I1125 16:37:09.912049 4800 generic.go:334] "Generic (PLEG): container finished" podID="03946100-a692-485c-a49a-f6ef116f81dd" containerID="64a5516acc5d540ae132177aba71ca3d5a899ba7f13aea45407ca4c09ad16cd3" exitCode=0 Nov 25 16:37:09 crc kubenswrapper[4800]: I1125 16:37:09.912113 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4qhtb" event={"ID":"03946100-a692-485c-a49a-f6ef116f81dd","Type":"ContainerDied","Data":"64a5516acc5d540ae132177aba71ca3d5a899ba7f13aea45407ca4c09ad16cd3"} Nov 25 16:37:09 crc kubenswrapper[4800]: I1125 16:37:09.912360 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4qhtb" event={"ID":"03946100-a692-485c-a49a-f6ef116f81dd","Type":"ContainerStarted","Data":"041758233ce5413f98c42743884ff63e7dc7f1fda3dd49619e6f388f51bac218"} Nov 25 16:37:09 crc kubenswrapper[4800]: I1125 16:37:09.914791 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 16:37:10 crc kubenswrapper[4800]: I1125 16:37:10.927750 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4qhtb" event={"ID":"03946100-a692-485c-a49a-f6ef116f81dd","Type":"ContainerStarted","Data":"c9297b178768425985cee2309347436889aebe174389db70c35cb42a76da96a7"} Nov 25 16:37:12 crc kubenswrapper[4800]: I1125 16:37:12.948786 4800 generic.go:334] "Generic (PLEG): container finished" podID="03946100-a692-485c-a49a-f6ef116f81dd" containerID="c9297b178768425985cee2309347436889aebe174389db70c35cb42a76da96a7" exitCode=0 Nov 25 16:37:12 crc kubenswrapper[4800]: I1125 16:37:12.948868 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4qhtb" event={"ID":"03946100-a692-485c-a49a-f6ef116f81dd","Type":"ContainerDied","Data":"c9297b178768425985cee2309347436889aebe174389db70c35cb42a76da96a7"} Nov 25 16:37:14 crc kubenswrapper[4800]: I1125 16:37:14.970186 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4qhtb" event={"ID":"03946100-a692-485c-a49a-f6ef116f81dd","Type":"ContainerStarted","Data":"4470a478d3fa68e8705dd5098f07bcc8febd051721c706e3c878fe4e017ebe8a"} Nov 25 16:37:15 crc kubenswrapper[4800]: I1125 16:37:15.012443 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4qhtb" podStartSLOduration=3.812782528 podStartE2EDuration="8.012419126s" podCreationTimestamp="2025-11-25 16:37:07 +0000 UTC" firstStartedPulling="2025-11-25 16:37:09.914401455 +0000 UTC m=+4790.968809927" lastFinishedPulling="2025-11-25 16:37:14.114038043 +0000 UTC m=+4795.168446525" observedRunningTime="2025-11-25 16:37:15.002415064 +0000 UTC m=+4796.056823546" watchObservedRunningTime="2025-11-25 16:37:15.012419126 +0000 UTC m=+4796.066827608" Nov 25 16:37:15 crc kubenswrapper[4800]: I1125 16:37:15.789692 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:37:15 crc kubenswrapper[4800]: E1125 16:37:15.790529 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:37:18 crc kubenswrapper[4800]: I1125 16:37:18.174134 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:18 crc kubenswrapper[4800]: I1125 16:37:18.175572 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:18 crc kubenswrapper[4800]: I1125 16:37:18.226047 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:19 crc kubenswrapper[4800]: I1125 16:37:19.070223 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:19 crc kubenswrapper[4800]: I1125 16:37:19.126485 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4qhtb"] Nov 25 16:37:21 crc kubenswrapper[4800]: I1125 16:37:21.034116 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4qhtb" podUID="03946100-a692-485c-a49a-f6ef116f81dd" containerName="registry-server" containerID="cri-o://4470a478d3fa68e8705dd5098f07bcc8febd051721c706e3c878fe4e017ebe8a" gracePeriod=2 Nov 25 16:37:22 crc kubenswrapper[4800]: I1125 16:37:22.046610 4800 generic.go:334] "Generic (PLEG): container finished" podID="03946100-a692-485c-a49a-f6ef116f81dd" containerID="4470a478d3fa68e8705dd5098f07bcc8febd051721c706e3c878fe4e017ebe8a" exitCode=0 Nov 25 16:37:22 crc kubenswrapper[4800]: I1125 16:37:22.046663 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4qhtb" event={"ID":"03946100-a692-485c-a49a-f6ef116f81dd","Type":"ContainerDied","Data":"4470a478d3fa68e8705dd5098f07bcc8febd051721c706e3c878fe4e017ebe8a"} Nov 25 16:37:22 crc kubenswrapper[4800]: I1125 16:37:22.308306 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:22 crc kubenswrapper[4800]: I1125 16:37:22.391336 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03946100-a692-485c-a49a-f6ef116f81dd-utilities\") pod \"03946100-a692-485c-a49a-f6ef116f81dd\" (UID: \"03946100-a692-485c-a49a-f6ef116f81dd\") " Nov 25 16:37:22 crc kubenswrapper[4800]: I1125 16:37:22.391522 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grlsj\" (UniqueName: \"kubernetes.io/projected/03946100-a692-485c-a49a-f6ef116f81dd-kube-api-access-grlsj\") pod \"03946100-a692-485c-a49a-f6ef116f81dd\" (UID: \"03946100-a692-485c-a49a-f6ef116f81dd\") " Nov 25 16:37:22 crc kubenswrapper[4800]: I1125 16:37:22.391562 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03946100-a692-485c-a49a-f6ef116f81dd-catalog-content\") pod \"03946100-a692-485c-a49a-f6ef116f81dd\" (UID: \"03946100-a692-485c-a49a-f6ef116f81dd\") " Nov 25 16:37:22 crc kubenswrapper[4800]: I1125 16:37:22.392332 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03946100-a692-485c-a49a-f6ef116f81dd-utilities" (OuterVolumeSpecName: "utilities") pod "03946100-a692-485c-a49a-f6ef116f81dd" (UID: "03946100-a692-485c-a49a-f6ef116f81dd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:37:22 crc kubenswrapper[4800]: I1125 16:37:22.402122 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03946100-a692-485c-a49a-f6ef116f81dd-kube-api-access-grlsj" (OuterVolumeSpecName: "kube-api-access-grlsj") pod "03946100-a692-485c-a49a-f6ef116f81dd" (UID: "03946100-a692-485c-a49a-f6ef116f81dd"). InnerVolumeSpecName "kube-api-access-grlsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:37:22 crc kubenswrapper[4800]: I1125 16:37:22.452261 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03946100-a692-485c-a49a-f6ef116f81dd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "03946100-a692-485c-a49a-f6ef116f81dd" (UID: "03946100-a692-485c-a49a-f6ef116f81dd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:37:22 crc kubenswrapper[4800]: I1125 16:37:22.494144 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grlsj\" (UniqueName: \"kubernetes.io/projected/03946100-a692-485c-a49a-f6ef116f81dd-kube-api-access-grlsj\") on node \"crc\" DevicePath \"\"" Nov 25 16:37:22 crc kubenswrapper[4800]: I1125 16:37:22.494667 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03946100-a692-485c-a49a-f6ef116f81dd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:37:22 crc kubenswrapper[4800]: I1125 16:37:22.494685 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03946100-a692-485c-a49a-f6ef116f81dd-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:37:23 crc kubenswrapper[4800]: I1125 16:37:23.061791 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4qhtb" event={"ID":"03946100-a692-485c-a49a-f6ef116f81dd","Type":"ContainerDied","Data":"041758233ce5413f98c42743884ff63e7dc7f1fda3dd49619e6f388f51bac218"} Nov 25 16:37:23 crc kubenswrapper[4800]: I1125 16:37:23.061904 4800 scope.go:117] "RemoveContainer" containerID="4470a478d3fa68e8705dd5098f07bcc8febd051721c706e3c878fe4e017ebe8a" Nov 25 16:37:23 crc kubenswrapper[4800]: I1125 16:37:23.062568 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4qhtb" Nov 25 16:37:23 crc kubenswrapper[4800]: I1125 16:37:23.105705 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4qhtb"] Nov 25 16:37:23 crc kubenswrapper[4800]: I1125 16:37:23.115870 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4qhtb"] Nov 25 16:37:23 crc kubenswrapper[4800]: I1125 16:37:23.124752 4800 scope.go:117] "RemoveContainer" containerID="c9297b178768425985cee2309347436889aebe174389db70c35cb42a76da96a7" Nov 25 16:37:23 crc kubenswrapper[4800]: I1125 16:37:23.155510 4800 scope.go:117] "RemoveContainer" containerID="64a5516acc5d540ae132177aba71ca3d5a899ba7f13aea45407ca4c09ad16cd3" Nov 25 16:37:23 crc kubenswrapper[4800]: I1125 16:37:23.798451 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03946100-a692-485c-a49a-f6ef116f81dd" path="/var/lib/kubelet/pods/03946100-a692-485c-a49a-f6ef116f81dd/volumes" Nov 25 16:37:27 crc kubenswrapper[4800]: I1125 16:37:27.785748 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:37:27 crc kubenswrapper[4800]: E1125 16:37:27.786897 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:37:39 crc kubenswrapper[4800]: I1125 16:37:39.793919 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:37:39 crc kubenswrapper[4800]: E1125 16:37:39.795067 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:37:53 crc kubenswrapper[4800]: I1125 16:37:53.785942 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:37:54 crc kubenswrapper[4800]: I1125 16:37:54.428619 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"76caf072a2083d316b90a70c9add69381a298e2330b9ebc40c47e5ccffc1f713"} Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.028642 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mrh4l"] Nov 25 16:38:07 crc kubenswrapper[4800]: E1125 16:38:07.030646 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03946100-a692-485c-a49a-f6ef116f81dd" containerName="extract-content" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.030669 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="03946100-a692-485c-a49a-f6ef116f81dd" containerName="extract-content" Nov 25 16:38:07 crc kubenswrapper[4800]: E1125 16:38:07.030696 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03946100-a692-485c-a49a-f6ef116f81dd" containerName="registry-server" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.030703 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="03946100-a692-485c-a49a-f6ef116f81dd" containerName="registry-server" Nov 25 16:38:07 crc kubenswrapper[4800]: E1125 16:38:07.030725 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03946100-a692-485c-a49a-f6ef116f81dd" containerName="extract-utilities" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.030732 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="03946100-a692-485c-a49a-f6ef116f81dd" containerName="extract-utilities" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.031098 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="03946100-a692-485c-a49a-f6ef116f81dd" containerName="registry-server" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.033765 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.069552 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrh4l"] Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.100307 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmt9p\" (UniqueName: \"kubernetes.io/projected/0a01fedc-31ed-4206-953f-07edd134245f-kube-api-access-bmt9p\") pod \"redhat-marketplace-mrh4l\" (UID: \"0a01fedc-31ed-4206-953f-07edd134245f\") " pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.100411 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a01fedc-31ed-4206-953f-07edd134245f-catalog-content\") pod \"redhat-marketplace-mrh4l\" (UID: \"0a01fedc-31ed-4206-953f-07edd134245f\") " pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.100445 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a01fedc-31ed-4206-953f-07edd134245f-utilities\") pod \"redhat-marketplace-mrh4l\" (UID: \"0a01fedc-31ed-4206-953f-07edd134245f\") " pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.203091 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmt9p\" (UniqueName: \"kubernetes.io/projected/0a01fedc-31ed-4206-953f-07edd134245f-kube-api-access-bmt9p\") pod \"redhat-marketplace-mrh4l\" (UID: \"0a01fedc-31ed-4206-953f-07edd134245f\") " pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.203548 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a01fedc-31ed-4206-953f-07edd134245f-catalog-content\") pod \"redhat-marketplace-mrh4l\" (UID: \"0a01fedc-31ed-4206-953f-07edd134245f\") " pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.203618 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a01fedc-31ed-4206-953f-07edd134245f-utilities\") pod \"redhat-marketplace-mrh4l\" (UID: \"0a01fedc-31ed-4206-953f-07edd134245f\") " pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.204091 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a01fedc-31ed-4206-953f-07edd134245f-catalog-content\") pod \"redhat-marketplace-mrh4l\" (UID: \"0a01fedc-31ed-4206-953f-07edd134245f\") " pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.204182 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a01fedc-31ed-4206-953f-07edd134245f-utilities\") pod \"redhat-marketplace-mrh4l\" (UID: \"0a01fedc-31ed-4206-953f-07edd134245f\") " pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.227694 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmt9p\" (UniqueName: \"kubernetes.io/projected/0a01fedc-31ed-4206-953f-07edd134245f-kube-api-access-bmt9p\") pod \"redhat-marketplace-mrh4l\" (UID: \"0a01fedc-31ed-4206-953f-07edd134245f\") " pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:07 crc kubenswrapper[4800]: I1125 16:38:07.367748 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:08 crc kubenswrapper[4800]: I1125 16:38:08.243037 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrh4l"] Nov 25 16:38:08 crc kubenswrapper[4800]: I1125 16:38:08.567589 4800 generic.go:334] "Generic (PLEG): container finished" podID="0a01fedc-31ed-4206-953f-07edd134245f" containerID="b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453" exitCode=0 Nov 25 16:38:08 crc kubenswrapper[4800]: I1125 16:38:08.567944 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrh4l" event={"ID":"0a01fedc-31ed-4206-953f-07edd134245f","Type":"ContainerDied","Data":"b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453"} Nov 25 16:38:08 crc kubenswrapper[4800]: I1125 16:38:08.568029 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrh4l" event={"ID":"0a01fedc-31ed-4206-953f-07edd134245f","Type":"ContainerStarted","Data":"8752d88865a7442d3087794b648f2df718b8a9ac7b7b155959416b4d1473b30c"} Nov 25 16:38:08 crc kubenswrapper[4800]: E1125 16:38:08.691835 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a01fedc_31ed_4206_953f_07edd134245f.slice/crio-b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a01fedc_31ed_4206_953f_07edd134245f.slice/crio-conmon-b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453.scope\": RecentStats: unable to find data in memory cache]" Nov 25 16:38:09 crc kubenswrapper[4800]: I1125 16:38:09.585212 4800 generic.go:334] "Generic (PLEG): container finished" podID="0a01fedc-31ed-4206-953f-07edd134245f" containerID="06bff1b41dde419ef48c67f96bf43a9795e4eb28a7a7a4942b87c74a81f0ccd4" exitCode=0 Nov 25 16:38:09 crc kubenswrapper[4800]: I1125 16:38:09.585939 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrh4l" event={"ID":"0a01fedc-31ed-4206-953f-07edd134245f","Type":"ContainerDied","Data":"06bff1b41dde419ef48c67f96bf43a9795e4eb28a7a7a4942b87c74a81f0ccd4"} Nov 25 16:38:10 crc kubenswrapper[4800]: I1125 16:38:10.599817 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrh4l" event={"ID":"0a01fedc-31ed-4206-953f-07edd134245f","Type":"ContainerStarted","Data":"526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9"} Nov 25 16:38:17 crc kubenswrapper[4800]: I1125 16:38:17.368453 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:17 crc kubenswrapper[4800]: I1125 16:38:17.369204 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:17 crc kubenswrapper[4800]: I1125 16:38:17.443729 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:17 crc kubenswrapper[4800]: I1125 16:38:17.467273 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mrh4l" podStartSLOduration=9.839270717 podStartE2EDuration="11.467255536s" podCreationTimestamp="2025-11-25 16:38:06 +0000 UTC" firstStartedPulling="2025-11-25 16:38:08.570174294 +0000 UTC m=+4849.624582776" lastFinishedPulling="2025-11-25 16:38:10.198159103 +0000 UTC m=+4851.252567595" observedRunningTime="2025-11-25 16:38:10.635183434 +0000 UTC m=+4851.689591926" watchObservedRunningTime="2025-11-25 16:38:17.467255536 +0000 UTC m=+4858.521664018" Nov 25 16:38:17 crc kubenswrapper[4800]: I1125 16:38:17.717972 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:17 crc kubenswrapper[4800]: I1125 16:38:17.771831 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrh4l"] Nov 25 16:38:19 crc kubenswrapper[4800]: I1125 16:38:19.681420 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mrh4l" podUID="0a01fedc-31ed-4206-953f-07edd134245f" containerName="registry-server" containerID="cri-o://526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9" gracePeriod=2 Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.371298 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.416210 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a01fedc-31ed-4206-953f-07edd134245f-utilities\") pod \"0a01fedc-31ed-4206-953f-07edd134245f\" (UID: \"0a01fedc-31ed-4206-953f-07edd134245f\") " Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.416297 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmt9p\" (UniqueName: \"kubernetes.io/projected/0a01fedc-31ed-4206-953f-07edd134245f-kube-api-access-bmt9p\") pod \"0a01fedc-31ed-4206-953f-07edd134245f\" (UID: \"0a01fedc-31ed-4206-953f-07edd134245f\") " Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.416554 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a01fedc-31ed-4206-953f-07edd134245f-catalog-content\") pod \"0a01fedc-31ed-4206-953f-07edd134245f\" (UID: \"0a01fedc-31ed-4206-953f-07edd134245f\") " Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.417415 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a01fedc-31ed-4206-953f-07edd134245f-utilities" (OuterVolumeSpecName: "utilities") pod "0a01fedc-31ed-4206-953f-07edd134245f" (UID: "0a01fedc-31ed-4206-953f-07edd134245f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.425439 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a01fedc-31ed-4206-953f-07edd134245f-kube-api-access-bmt9p" (OuterVolumeSpecName: "kube-api-access-bmt9p") pod "0a01fedc-31ed-4206-953f-07edd134245f" (UID: "0a01fedc-31ed-4206-953f-07edd134245f"). InnerVolumeSpecName "kube-api-access-bmt9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.434303 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a01fedc-31ed-4206-953f-07edd134245f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0a01fedc-31ed-4206-953f-07edd134245f" (UID: "0a01fedc-31ed-4206-953f-07edd134245f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.519190 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmt9p\" (UniqueName: \"kubernetes.io/projected/0a01fedc-31ed-4206-953f-07edd134245f-kube-api-access-bmt9p\") on node \"crc\" DevicePath \"\"" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.519245 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a01fedc-31ed-4206-953f-07edd134245f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.519264 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a01fedc-31ed-4206-953f-07edd134245f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.695306 4800 generic.go:334] "Generic (PLEG): container finished" podID="0a01fedc-31ed-4206-953f-07edd134245f" containerID="526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9" exitCode=0 Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.695398 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mrh4l" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.695397 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrh4l" event={"ID":"0a01fedc-31ed-4206-953f-07edd134245f","Type":"ContainerDied","Data":"526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9"} Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.695534 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrh4l" event={"ID":"0a01fedc-31ed-4206-953f-07edd134245f","Type":"ContainerDied","Data":"8752d88865a7442d3087794b648f2df718b8a9ac7b7b155959416b4d1473b30c"} Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.695567 4800 scope.go:117] "RemoveContainer" containerID="526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.726887 4800 scope.go:117] "RemoveContainer" containerID="06bff1b41dde419ef48c67f96bf43a9795e4eb28a7a7a4942b87c74a81f0ccd4" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.733081 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrh4l"] Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.745317 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrh4l"] Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.757556 4800 scope.go:117] "RemoveContainer" containerID="b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.818132 4800 scope.go:117] "RemoveContainer" containerID="526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9" Nov 25 16:38:20 crc kubenswrapper[4800]: E1125 16:38:20.818550 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9\": container with ID starting with 526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9 not found: ID does not exist" containerID="526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.818587 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9"} err="failed to get container status \"526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9\": rpc error: code = NotFound desc = could not find container \"526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9\": container with ID starting with 526baba5cf16c50c54400701779d038eec17ef7b0e5c7508dc1271aeb2ef24b9 not found: ID does not exist" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.818615 4800 scope.go:117] "RemoveContainer" containerID="06bff1b41dde419ef48c67f96bf43a9795e4eb28a7a7a4942b87c74a81f0ccd4" Nov 25 16:38:20 crc kubenswrapper[4800]: E1125 16:38:20.819204 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06bff1b41dde419ef48c67f96bf43a9795e4eb28a7a7a4942b87c74a81f0ccd4\": container with ID starting with 06bff1b41dde419ef48c67f96bf43a9795e4eb28a7a7a4942b87c74a81f0ccd4 not found: ID does not exist" containerID="06bff1b41dde419ef48c67f96bf43a9795e4eb28a7a7a4942b87c74a81f0ccd4" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.819240 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06bff1b41dde419ef48c67f96bf43a9795e4eb28a7a7a4942b87c74a81f0ccd4"} err="failed to get container status \"06bff1b41dde419ef48c67f96bf43a9795e4eb28a7a7a4942b87c74a81f0ccd4\": rpc error: code = NotFound desc = could not find container \"06bff1b41dde419ef48c67f96bf43a9795e4eb28a7a7a4942b87c74a81f0ccd4\": container with ID starting with 06bff1b41dde419ef48c67f96bf43a9795e4eb28a7a7a4942b87c74a81f0ccd4 not found: ID does not exist" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.819260 4800 scope.go:117] "RemoveContainer" containerID="b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453" Nov 25 16:38:20 crc kubenswrapper[4800]: E1125 16:38:20.819632 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453\": container with ID starting with b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453 not found: ID does not exist" containerID="b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453" Nov 25 16:38:20 crc kubenswrapper[4800]: I1125 16:38:20.819659 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453"} err="failed to get container status \"b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453\": rpc error: code = NotFound desc = could not find container \"b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453\": container with ID starting with b0056972f39ed87982666dad6b4a114c65a0f7c44480553f3d9a90d3f8f76453 not found: ID does not exist" Nov 25 16:38:21 crc kubenswrapper[4800]: I1125 16:38:21.795793 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a01fedc-31ed-4206-953f-07edd134245f" path="/var/lib/kubelet/pods/0a01fedc-31ed-4206-953f-07edd134245f/volumes" Nov 25 16:40:12 crc kubenswrapper[4800]: I1125 16:40:12.640044 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:40:12 crc kubenswrapper[4800]: I1125 16:40:12.640980 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:40:42 crc kubenswrapper[4800]: I1125 16:40:42.639547 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:40:42 crc kubenswrapper[4800]: I1125 16:40:42.640202 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:41:05 crc kubenswrapper[4800]: I1125 16:41:05.982227 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bl8n4"] Nov 25 16:41:05 crc kubenswrapper[4800]: E1125 16:41:05.983519 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a01fedc-31ed-4206-953f-07edd134245f" containerName="registry-server" Nov 25 16:41:05 crc kubenswrapper[4800]: I1125 16:41:05.983538 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a01fedc-31ed-4206-953f-07edd134245f" containerName="registry-server" Nov 25 16:41:05 crc kubenswrapper[4800]: E1125 16:41:05.983585 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a01fedc-31ed-4206-953f-07edd134245f" containerName="extract-utilities" Nov 25 16:41:05 crc kubenswrapper[4800]: I1125 16:41:05.983592 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a01fedc-31ed-4206-953f-07edd134245f" containerName="extract-utilities" Nov 25 16:41:05 crc kubenswrapper[4800]: E1125 16:41:05.983603 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a01fedc-31ed-4206-953f-07edd134245f" containerName="extract-content" Nov 25 16:41:05 crc kubenswrapper[4800]: I1125 16:41:05.983610 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a01fedc-31ed-4206-953f-07edd134245f" containerName="extract-content" Nov 25 16:41:05 crc kubenswrapper[4800]: I1125 16:41:05.984071 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a01fedc-31ed-4206-953f-07edd134245f" containerName="registry-server" Nov 25 16:41:05 crc kubenswrapper[4800]: I1125 16:41:05.987261 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:06 crc kubenswrapper[4800]: I1125 16:41:06.010692 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bl8n4"] Nov 25 16:41:06 crc kubenswrapper[4800]: I1125 16:41:06.068450 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4487391-49b1-468e-a1d8-7f43ed8dc72a-catalog-content\") pod \"community-operators-bl8n4\" (UID: \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\") " pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:06 crc kubenswrapper[4800]: I1125 16:41:06.068532 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4487391-49b1-468e-a1d8-7f43ed8dc72a-utilities\") pod \"community-operators-bl8n4\" (UID: \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\") " pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:06 crc kubenswrapper[4800]: I1125 16:41:06.068711 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prgql\" (UniqueName: \"kubernetes.io/projected/f4487391-49b1-468e-a1d8-7f43ed8dc72a-kube-api-access-prgql\") pod \"community-operators-bl8n4\" (UID: \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\") " pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:06 crc kubenswrapper[4800]: I1125 16:41:06.170445 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4487391-49b1-468e-a1d8-7f43ed8dc72a-catalog-content\") pod \"community-operators-bl8n4\" (UID: \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\") " pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:06 crc kubenswrapper[4800]: I1125 16:41:06.170526 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4487391-49b1-468e-a1d8-7f43ed8dc72a-utilities\") pod \"community-operators-bl8n4\" (UID: \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\") " pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:06 crc kubenswrapper[4800]: I1125 16:41:06.170679 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prgql\" (UniqueName: \"kubernetes.io/projected/f4487391-49b1-468e-a1d8-7f43ed8dc72a-kube-api-access-prgql\") pod \"community-operators-bl8n4\" (UID: \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\") " pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:06 crc kubenswrapper[4800]: I1125 16:41:06.171208 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4487391-49b1-468e-a1d8-7f43ed8dc72a-utilities\") pod \"community-operators-bl8n4\" (UID: \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\") " pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:06 crc kubenswrapper[4800]: I1125 16:41:06.173234 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4487391-49b1-468e-a1d8-7f43ed8dc72a-catalog-content\") pod \"community-operators-bl8n4\" (UID: \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\") " pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:06 crc kubenswrapper[4800]: I1125 16:41:06.200504 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prgql\" (UniqueName: \"kubernetes.io/projected/f4487391-49b1-468e-a1d8-7f43ed8dc72a-kube-api-access-prgql\") pod \"community-operators-bl8n4\" (UID: \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\") " pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:06 crc kubenswrapper[4800]: I1125 16:41:06.311148 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:06 crc kubenswrapper[4800]: I1125 16:41:06.895021 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bl8n4"] Nov 25 16:41:07 crc kubenswrapper[4800]: I1125 16:41:07.330890 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerID="085d5b87b4d51120bb5487fe6c2524ff03fb1db4420a52f32c54a5cad7cf2530" exitCode=0 Nov 25 16:41:07 crc kubenswrapper[4800]: I1125 16:41:07.330982 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bl8n4" event={"ID":"f4487391-49b1-468e-a1d8-7f43ed8dc72a","Type":"ContainerDied","Data":"085d5b87b4d51120bb5487fe6c2524ff03fb1db4420a52f32c54a5cad7cf2530"} Nov 25 16:41:07 crc kubenswrapper[4800]: I1125 16:41:07.331443 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bl8n4" event={"ID":"f4487391-49b1-468e-a1d8-7f43ed8dc72a","Type":"ContainerStarted","Data":"de75c22057b2a4fff66c595a35f1523e56972546a7e460adbe670113a28eaf39"} Nov 25 16:41:08 crc kubenswrapper[4800]: I1125 16:41:08.345339 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bl8n4" event={"ID":"f4487391-49b1-468e-a1d8-7f43ed8dc72a","Type":"ContainerStarted","Data":"2f8ff9cffd3a1e857ff2d18928e8e0ecbb605e09a59df253095ed73359178d1e"} Nov 25 16:41:08 crc kubenswrapper[4800]: I1125 16:41:08.967306 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nkngm"] Nov 25 16:41:08 crc kubenswrapper[4800]: I1125 16:41:08.969967 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:08 crc kubenswrapper[4800]: I1125 16:41:08.999433 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nkngm"] Nov 25 16:41:09 crc kubenswrapper[4800]: I1125 16:41:09.044527 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce983dc5-5acc-48e3-ada7-16e32a405832-utilities\") pod \"redhat-operators-nkngm\" (UID: \"ce983dc5-5acc-48e3-ada7-16e32a405832\") " pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:09 crc kubenswrapper[4800]: I1125 16:41:09.044589 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fshbq\" (UniqueName: \"kubernetes.io/projected/ce983dc5-5acc-48e3-ada7-16e32a405832-kube-api-access-fshbq\") pod \"redhat-operators-nkngm\" (UID: \"ce983dc5-5acc-48e3-ada7-16e32a405832\") " pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:09 crc kubenswrapper[4800]: I1125 16:41:09.044784 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce983dc5-5acc-48e3-ada7-16e32a405832-catalog-content\") pod \"redhat-operators-nkngm\" (UID: \"ce983dc5-5acc-48e3-ada7-16e32a405832\") " pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:09 crc kubenswrapper[4800]: I1125 16:41:09.147793 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce983dc5-5acc-48e3-ada7-16e32a405832-catalog-content\") pod \"redhat-operators-nkngm\" (UID: \"ce983dc5-5acc-48e3-ada7-16e32a405832\") " pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:09 crc kubenswrapper[4800]: I1125 16:41:09.148223 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce983dc5-5acc-48e3-ada7-16e32a405832-utilities\") pod \"redhat-operators-nkngm\" (UID: \"ce983dc5-5acc-48e3-ada7-16e32a405832\") " pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:09 crc kubenswrapper[4800]: I1125 16:41:09.148255 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fshbq\" (UniqueName: \"kubernetes.io/projected/ce983dc5-5acc-48e3-ada7-16e32a405832-kube-api-access-fshbq\") pod \"redhat-operators-nkngm\" (UID: \"ce983dc5-5acc-48e3-ada7-16e32a405832\") " pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:09 crc kubenswrapper[4800]: I1125 16:41:09.148770 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce983dc5-5acc-48e3-ada7-16e32a405832-utilities\") pod \"redhat-operators-nkngm\" (UID: \"ce983dc5-5acc-48e3-ada7-16e32a405832\") " pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:09 crc kubenswrapper[4800]: I1125 16:41:09.149240 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce983dc5-5acc-48e3-ada7-16e32a405832-catalog-content\") pod \"redhat-operators-nkngm\" (UID: \"ce983dc5-5acc-48e3-ada7-16e32a405832\") " pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:09 crc kubenswrapper[4800]: I1125 16:41:09.172273 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fshbq\" (UniqueName: \"kubernetes.io/projected/ce983dc5-5acc-48e3-ada7-16e32a405832-kube-api-access-fshbq\") pod \"redhat-operators-nkngm\" (UID: \"ce983dc5-5acc-48e3-ada7-16e32a405832\") " pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:09 crc kubenswrapper[4800]: I1125 16:41:09.301080 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:09 crc kubenswrapper[4800]: W1125 16:41:09.885488 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce983dc5_5acc_48e3_ada7_16e32a405832.slice/crio-632d17299fe2240497cab141641160a392af7e61be475826499d04a0252cbbc0 WatchSource:0}: Error finding container 632d17299fe2240497cab141641160a392af7e61be475826499d04a0252cbbc0: Status 404 returned error can't find the container with id 632d17299fe2240497cab141641160a392af7e61be475826499d04a0252cbbc0 Nov 25 16:41:09 crc kubenswrapper[4800]: I1125 16:41:09.888409 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nkngm"] Nov 25 16:41:10 crc kubenswrapper[4800]: I1125 16:41:10.377257 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nkngm" event={"ID":"ce983dc5-5acc-48e3-ada7-16e32a405832","Type":"ContainerStarted","Data":"632d17299fe2240497cab141641160a392af7e61be475826499d04a0252cbbc0"} Nov 25 16:41:11 crc kubenswrapper[4800]: I1125 16:41:11.394666 4800 generic.go:334] "Generic (PLEG): container finished" podID="ce983dc5-5acc-48e3-ada7-16e32a405832" containerID="732aa784a066784b8c3a5f611c879f3576948f7b3f54b3d929d43c2b3a72a861" exitCode=0 Nov 25 16:41:11 crc kubenswrapper[4800]: I1125 16:41:11.395402 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nkngm" event={"ID":"ce983dc5-5acc-48e3-ada7-16e32a405832","Type":"ContainerDied","Data":"732aa784a066784b8c3a5f611c879f3576948f7b3f54b3d929d43c2b3a72a861"} Nov 25 16:41:11 crc kubenswrapper[4800]: I1125 16:41:11.405229 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerID="2f8ff9cffd3a1e857ff2d18928e8e0ecbb605e09a59df253095ed73359178d1e" exitCode=0 Nov 25 16:41:11 crc kubenswrapper[4800]: I1125 16:41:11.405293 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bl8n4" event={"ID":"f4487391-49b1-468e-a1d8-7f43ed8dc72a","Type":"ContainerDied","Data":"2f8ff9cffd3a1e857ff2d18928e8e0ecbb605e09a59df253095ed73359178d1e"} Nov 25 16:41:12 crc kubenswrapper[4800]: I1125 16:41:12.425888 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bl8n4" event={"ID":"f4487391-49b1-468e-a1d8-7f43ed8dc72a","Type":"ContainerStarted","Data":"1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b"} Nov 25 16:41:12 crc kubenswrapper[4800]: I1125 16:41:12.451272 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bl8n4" podStartSLOduration=2.921250371 podStartE2EDuration="7.451252353s" podCreationTimestamp="2025-11-25 16:41:05 +0000 UTC" firstStartedPulling="2025-11-25 16:41:07.333504083 +0000 UTC m=+5028.387912565" lastFinishedPulling="2025-11-25 16:41:11.863506065 +0000 UTC m=+5032.917914547" observedRunningTime="2025-11-25 16:41:12.4456395 +0000 UTC m=+5033.500047992" watchObservedRunningTime="2025-11-25 16:41:12.451252353 +0000 UTC m=+5033.505660835" Nov 25 16:41:12 crc kubenswrapper[4800]: I1125 16:41:12.640953 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:41:12 crc kubenswrapper[4800]: I1125 16:41:12.641060 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:41:12 crc kubenswrapper[4800]: I1125 16:41:12.641136 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 16:41:12 crc kubenswrapper[4800]: I1125 16:41:12.642626 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"76caf072a2083d316b90a70c9add69381a298e2330b9ebc40c47e5ccffc1f713"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:41:12 crc kubenswrapper[4800]: I1125 16:41:12.642713 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://76caf072a2083d316b90a70c9add69381a298e2330b9ebc40c47e5ccffc1f713" gracePeriod=600 Nov 25 16:41:13 crc kubenswrapper[4800]: I1125 16:41:13.438195 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="76caf072a2083d316b90a70c9add69381a298e2330b9ebc40c47e5ccffc1f713" exitCode=0 Nov 25 16:41:13 crc kubenswrapper[4800]: I1125 16:41:13.438249 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"76caf072a2083d316b90a70c9add69381a298e2330b9ebc40c47e5ccffc1f713"} Nov 25 16:41:13 crc kubenswrapper[4800]: I1125 16:41:13.438296 4800 scope.go:117] "RemoveContainer" containerID="1acbce0a0776c94322dc4b1017c67a73b28fa412754b0ec70ddc6d8d24f83c49" Nov 25 16:41:14 crc kubenswrapper[4800]: I1125 16:41:14.454097 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f"} Nov 25 16:41:14 crc kubenswrapper[4800]: I1125 16:41:14.456380 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nkngm" event={"ID":"ce983dc5-5acc-48e3-ada7-16e32a405832","Type":"ContainerStarted","Data":"47681e6be99600bd50511e0561db1a0a6baae069ba2cfec3afb671d4f8f7136c"} Nov 25 16:41:16 crc kubenswrapper[4800]: I1125 16:41:16.312384 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:16 crc kubenswrapper[4800]: I1125 16:41:16.313426 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:17 crc kubenswrapper[4800]: I1125 16:41:17.370159 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-bl8n4" podUID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerName="registry-server" probeResult="failure" output=< Nov 25 16:41:17 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 16:41:17 crc kubenswrapper[4800]: > Nov 25 16:41:23 crc kubenswrapper[4800]: I1125 16:41:23.567196 4800 generic.go:334] "Generic (PLEG): container finished" podID="ce983dc5-5acc-48e3-ada7-16e32a405832" containerID="47681e6be99600bd50511e0561db1a0a6baae069ba2cfec3afb671d4f8f7136c" exitCode=0 Nov 25 16:41:23 crc kubenswrapper[4800]: I1125 16:41:23.567394 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nkngm" event={"ID":"ce983dc5-5acc-48e3-ada7-16e32a405832","Type":"ContainerDied","Data":"47681e6be99600bd50511e0561db1a0a6baae069ba2cfec3afb671d4f8f7136c"} Nov 25 16:41:24 crc kubenswrapper[4800]: I1125 16:41:24.590070 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nkngm" event={"ID":"ce983dc5-5acc-48e3-ada7-16e32a405832","Type":"ContainerStarted","Data":"c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2"} Nov 25 16:41:27 crc kubenswrapper[4800]: I1125 16:41:27.617158 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-bl8n4" podUID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerName="registry-server" probeResult="failure" output=< Nov 25 16:41:27 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 16:41:27 crc kubenswrapper[4800]: > Nov 25 16:41:29 crc kubenswrapper[4800]: I1125 16:41:29.302048 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:29 crc kubenswrapper[4800]: I1125 16:41:29.302333 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:30 crc kubenswrapper[4800]: I1125 16:41:30.359574 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nkngm" podUID="ce983dc5-5acc-48e3-ada7-16e32a405832" containerName="registry-server" probeResult="failure" output=< Nov 25 16:41:30 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 16:41:30 crc kubenswrapper[4800]: > Nov 25 16:41:37 crc kubenswrapper[4800]: I1125 16:41:37.360546 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-bl8n4" podUID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerName="registry-server" probeResult="failure" output=< Nov 25 16:41:37 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 16:41:37 crc kubenswrapper[4800]: > Nov 25 16:41:39 crc kubenswrapper[4800]: I1125 16:41:39.356122 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:39 crc kubenswrapper[4800]: I1125 16:41:39.377040 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nkngm" podStartSLOduration=18.687731233 podStartE2EDuration="31.377022785s" podCreationTimestamp="2025-11-25 16:41:08 +0000 UTC" firstStartedPulling="2025-11-25 16:41:11.397823793 +0000 UTC m=+5032.452232275" lastFinishedPulling="2025-11-25 16:41:24.087115345 +0000 UTC m=+5045.141523827" observedRunningTime="2025-11-25 16:41:24.618784315 +0000 UTC m=+5045.673192817" watchObservedRunningTime="2025-11-25 16:41:39.377022785 +0000 UTC m=+5060.431431257" Nov 25 16:41:39 crc kubenswrapper[4800]: I1125 16:41:39.407013 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:40 crc kubenswrapper[4800]: I1125 16:41:40.170732 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nkngm"] Nov 25 16:41:40 crc kubenswrapper[4800]: I1125 16:41:40.760419 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nkngm" podUID="ce983dc5-5acc-48e3-ada7-16e32a405832" containerName="registry-server" containerID="cri-o://c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2" gracePeriod=2 Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.526470 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.626045 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fshbq\" (UniqueName: \"kubernetes.io/projected/ce983dc5-5acc-48e3-ada7-16e32a405832-kube-api-access-fshbq\") pod \"ce983dc5-5acc-48e3-ada7-16e32a405832\" (UID: \"ce983dc5-5acc-48e3-ada7-16e32a405832\") " Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.626102 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce983dc5-5acc-48e3-ada7-16e32a405832-catalog-content\") pod \"ce983dc5-5acc-48e3-ada7-16e32a405832\" (UID: \"ce983dc5-5acc-48e3-ada7-16e32a405832\") " Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.626253 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce983dc5-5acc-48e3-ada7-16e32a405832-utilities\") pod \"ce983dc5-5acc-48e3-ada7-16e32a405832\" (UID: \"ce983dc5-5acc-48e3-ada7-16e32a405832\") " Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.627048 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce983dc5-5acc-48e3-ada7-16e32a405832-utilities" (OuterVolumeSpecName: "utilities") pod "ce983dc5-5acc-48e3-ada7-16e32a405832" (UID: "ce983dc5-5acc-48e3-ada7-16e32a405832"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.635115 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce983dc5-5acc-48e3-ada7-16e32a405832-kube-api-access-fshbq" (OuterVolumeSpecName: "kube-api-access-fshbq") pod "ce983dc5-5acc-48e3-ada7-16e32a405832" (UID: "ce983dc5-5acc-48e3-ada7-16e32a405832"). InnerVolumeSpecName "kube-api-access-fshbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.726460 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce983dc5-5acc-48e3-ada7-16e32a405832-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ce983dc5-5acc-48e3-ada7-16e32a405832" (UID: "ce983dc5-5acc-48e3-ada7-16e32a405832"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.728401 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fshbq\" (UniqueName: \"kubernetes.io/projected/ce983dc5-5acc-48e3-ada7-16e32a405832-kube-api-access-fshbq\") on node \"crc\" DevicePath \"\"" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.728450 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce983dc5-5acc-48e3-ada7-16e32a405832-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.728463 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce983dc5-5acc-48e3-ada7-16e32a405832-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.771010 4800 generic.go:334] "Generic (PLEG): container finished" podID="ce983dc5-5acc-48e3-ada7-16e32a405832" containerID="c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2" exitCode=0 Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.771071 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nkngm" event={"ID":"ce983dc5-5acc-48e3-ada7-16e32a405832","Type":"ContainerDied","Data":"c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2"} Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.771101 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nkngm" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.771111 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nkngm" event={"ID":"ce983dc5-5acc-48e3-ada7-16e32a405832","Type":"ContainerDied","Data":"632d17299fe2240497cab141641160a392af7e61be475826499d04a0252cbbc0"} Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.771133 4800 scope.go:117] "RemoveContainer" containerID="c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.799338 4800 scope.go:117] "RemoveContainer" containerID="47681e6be99600bd50511e0561db1a0a6baae069ba2cfec3afb671d4f8f7136c" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.822234 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nkngm"] Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.832693 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nkngm"] Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.870772 4800 scope.go:117] "RemoveContainer" containerID="732aa784a066784b8c3a5f611c879f3576948f7b3f54b3d929d43c2b3a72a861" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.899791 4800 scope.go:117] "RemoveContainer" containerID="c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2" Nov 25 16:41:41 crc kubenswrapper[4800]: E1125 16:41:41.900290 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2\": container with ID starting with c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2 not found: ID does not exist" containerID="c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.900330 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2"} err="failed to get container status \"c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2\": rpc error: code = NotFound desc = could not find container \"c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2\": container with ID starting with c448ca088e6fcdac004c04b632c061445e6a6f4ccf82d8b12fae7de95a538ba2 not found: ID does not exist" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.900353 4800 scope.go:117] "RemoveContainer" containerID="47681e6be99600bd50511e0561db1a0a6baae069ba2cfec3afb671d4f8f7136c" Nov 25 16:41:41 crc kubenswrapper[4800]: E1125 16:41:41.900637 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47681e6be99600bd50511e0561db1a0a6baae069ba2cfec3afb671d4f8f7136c\": container with ID starting with 47681e6be99600bd50511e0561db1a0a6baae069ba2cfec3afb671d4f8f7136c not found: ID does not exist" containerID="47681e6be99600bd50511e0561db1a0a6baae069ba2cfec3afb671d4f8f7136c" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.900672 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47681e6be99600bd50511e0561db1a0a6baae069ba2cfec3afb671d4f8f7136c"} err="failed to get container status \"47681e6be99600bd50511e0561db1a0a6baae069ba2cfec3afb671d4f8f7136c\": rpc error: code = NotFound desc = could not find container \"47681e6be99600bd50511e0561db1a0a6baae069ba2cfec3afb671d4f8f7136c\": container with ID starting with 47681e6be99600bd50511e0561db1a0a6baae069ba2cfec3afb671d4f8f7136c not found: ID does not exist" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.900692 4800 scope.go:117] "RemoveContainer" containerID="732aa784a066784b8c3a5f611c879f3576948f7b3f54b3d929d43c2b3a72a861" Nov 25 16:41:41 crc kubenswrapper[4800]: E1125 16:41:41.900942 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"732aa784a066784b8c3a5f611c879f3576948f7b3f54b3d929d43c2b3a72a861\": container with ID starting with 732aa784a066784b8c3a5f611c879f3576948f7b3f54b3d929d43c2b3a72a861 not found: ID does not exist" containerID="732aa784a066784b8c3a5f611c879f3576948f7b3f54b3d929d43c2b3a72a861" Nov 25 16:41:41 crc kubenswrapper[4800]: I1125 16:41:41.900978 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732aa784a066784b8c3a5f611c879f3576948f7b3f54b3d929d43c2b3a72a861"} err="failed to get container status \"732aa784a066784b8c3a5f611c879f3576948f7b3f54b3d929d43c2b3a72a861\": rpc error: code = NotFound desc = could not find container \"732aa784a066784b8c3a5f611c879f3576948f7b3f54b3d929d43c2b3a72a861\": container with ID starting with 732aa784a066784b8c3a5f611c879f3576948f7b3f54b3d929d43c2b3a72a861 not found: ID does not exist" Nov 25 16:41:43 crc kubenswrapper[4800]: I1125 16:41:43.802732 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce983dc5-5acc-48e3-ada7-16e32a405832" path="/var/lib/kubelet/pods/ce983dc5-5acc-48e3-ada7-16e32a405832/volumes" Nov 25 16:41:46 crc kubenswrapper[4800]: I1125 16:41:46.365262 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:46 crc kubenswrapper[4800]: I1125 16:41:46.418446 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:46 crc kubenswrapper[4800]: I1125 16:41:46.612545 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bl8n4"] Nov 25 16:41:47 crc kubenswrapper[4800]: I1125 16:41:47.835063 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bl8n4" podUID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerName="registry-server" containerID="cri-o://1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b" gracePeriod=2 Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.524694 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.592807 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4487391-49b1-468e-a1d8-7f43ed8dc72a-utilities\") pod \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\" (UID: \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\") " Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.593313 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4487391-49b1-468e-a1d8-7f43ed8dc72a-catalog-content\") pod \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\" (UID: \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\") " Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.593468 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-prgql\" (UniqueName: \"kubernetes.io/projected/f4487391-49b1-468e-a1d8-7f43ed8dc72a-kube-api-access-prgql\") pod \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\" (UID: \"f4487391-49b1-468e-a1d8-7f43ed8dc72a\") " Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.593817 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4487391-49b1-468e-a1d8-7f43ed8dc72a-utilities" (OuterVolumeSpecName: "utilities") pod "f4487391-49b1-468e-a1d8-7f43ed8dc72a" (UID: "f4487391-49b1-468e-a1d8-7f43ed8dc72a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.594615 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4487391-49b1-468e-a1d8-7f43ed8dc72a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.600302 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4487391-49b1-468e-a1d8-7f43ed8dc72a-kube-api-access-prgql" (OuterVolumeSpecName: "kube-api-access-prgql") pod "f4487391-49b1-468e-a1d8-7f43ed8dc72a" (UID: "f4487391-49b1-468e-a1d8-7f43ed8dc72a"). InnerVolumeSpecName "kube-api-access-prgql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.657716 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4487391-49b1-468e-a1d8-7f43ed8dc72a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f4487391-49b1-468e-a1d8-7f43ed8dc72a" (UID: "f4487391-49b1-468e-a1d8-7f43ed8dc72a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.697036 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4487391-49b1-468e-a1d8-7f43ed8dc72a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.697338 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-prgql\" (UniqueName: \"kubernetes.io/projected/f4487391-49b1-468e-a1d8-7f43ed8dc72a-kube-api-access-prgql\") on node \"crc\" DevicePath \"\"" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.847583 4800 generic.go:334] "Generic (PLEG): container finished" podID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerID="1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b" exitCode=0 Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.847625 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bl8n4" event={"ID":"f4487391-49b1-468e-a1d8-7f43ed8dc72a","Type":"ContainerDied","Data":"1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b"} Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.847656 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bl8n4" event={"ID":"f4487391-49b1-468e-a1d8-7f43ed8dc72a","Type":"ContainerDied","Data":"de75c22057b2a4fff66c595a35f1523e56972546a7e460adbe670113a28eaf39"} Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.847675 4800 scope.go:117] "RemoveContainer" containerID="1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.847820 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bl8n4" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.895746 4800 scope.go:117] "RemoveContainer" containerID="2f8ff9cffd3a1e857ff2d18928e8e0ecbb605e09a59df253095ed73359178d1e" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.900588 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bl8n4"] Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.919328 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bl8n4"] Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.929883 4800 scope.go:117] "RemoveContainer" containerID="085d5b87b4d51120bb5487fe6c2524ff03fb1db4420a52f32c54a5cad7cf2530" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.987747 4800 scope.go:117] "RemoveContainer" containerID="1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b" Nov 25 16:41:48 crc kubenswrapper[4800]: E1125 16:41:48.990876 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b\": container with ID starting with 1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b not found: ID does not exist" containerID="1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.991512 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b"} err="failed to get container status \"1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b\": rpc error: code = NotFound desc = could not find container \"1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b\": container with ID starting with 1777b15adc32b15157c94c012bafdc43cf05b41ede7aa5eff8c6a6b63914d27b not found: ID does not exist" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.991544 4800 scope.go:117] "RemoveContainer" containerID="2f8ff9cffd3a1e857ff2d18928e8e0ecbb605e09a59df253095ed73359178d1e" Nov 25 16:41:48 crc kubenswrapper[4800]: E1125 16:41:48.992121 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f8ff9cffd3a1e857ff2d18928e8e0ecbb605e09a59df253095ed73359178d1e\": container with ID starting with 2f8ff9cffd3a1e857ff2d18928e8e0ecbb605e09a59df253095ed73359178d1e not found: ID does not exist" containerID="2f8ff9cffd3a1e857ff2d18928e8e0ecbb605e09a59df253095ed73359178d1e" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.992145 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f8ff9cffd3a1e857ff2d18928e8e0ecbb605e09a59df253095ed73359178d1e"} err="failed to get container status \"2f8ff9cffd3a1e857ff2d18928e8e0ecbb605e09a59df253095ed73359178d1e\": rpc error: code = NotFound desc = could not find container \"2f8ff9cffd3a1e857ff2d18928e8e0ecbb605e09a59df253095ed73359178d1e\": container with ID starting with 2f8ff9cffd3a1e857ff2d18928e8e0ecbb605e09a59df253095ed73359178d1e not found: ID does not exist" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.992161 4800 scope.go:117] "RemoveContainer" containerID="085d5b87b4d51120bb5487fe6c2524ff03fb1db4420a52f32c54a5cad7cf2530" Nov 25 16:41:48 crc kubenswrapper[4800]: E1125 16:41:48.998086 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"085d5b87b4d51120bb5487fe6c2524ff03fb1db4420a52f32c54a5cad7cf2530\": container with ID starting with 085d5b87b4d51120bb5487fe6c2524ff03fb1db4420a52f32c54a5cad7cf2530 not found: ID does not exist" containerID="085d5b87b4d51120bb5487fe6c2524ff03fb1db4420a52f32c54a5cad7cf2530" Nov 25 16:41:48 crc kubenswrapper[4800]: I1125 16:41:48.998182 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"085d5b87b4d51120bb5487fe6c2524ff03fb1db4420a52f32c54a5cad7cf2530"} err="failed to get container status \"085d5b87b4d51120bb5487fe6c2524ff03fb1db4420a52f32c54a5cad7cf2530\": rpc error: code = NotFound desc = could not find container \"085d5b87b4d51120bb5487fe6c2524ff03fb1db4420a52f32c54a5cad7cf2530\": container with ID starting with 085d5b87b4d51120bb5487fe6c2524ff03fb1db4420a52f32c54a5cad7cf2530 not found: ID does not exist" Nov 25 16:41:49 crc kubenswrapper[4800]: I1125 16:41:49.799129 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" path="/var/lib/kubelet/pods/f4487391-49b1-468e-a1d8-7f43ed8dc72a/volumes" Nov 25 16:43:42 crc kubenswrapper[4800]: I1125 16:43:42.640490 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:43:42 crc kubenswrapper[4800]: I1125 16:43:42.641057 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:44:12 crc kubenswrapper[4800]: I1125 16:44:12.639620 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:44:12 crc kubenswrapper[4800]: I1125 16:44:12.640064 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:44:42 crc kubenswrapper[4800]: I1125 16:44:42.640333 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:44:42 crc kubenswrapper[4800]: I1125 16:44:42.640883 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:44:42 crc kubenswrapper[4800]: I1125 16:44:42.640937 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 16:44:42 crc kubenswrapper[4800]: I1125 16:44:42.641738 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:44:42 crc kubenswrapper[4800]: I1125 16:44:42.641783 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" gracePeriod=600 Nov 25 16:44:42 crc kubenswrapper[4800]: E1125 16:44:42.820798 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:44:42 crc kubenswrapper[4800]: I1125 16:44:42.933725 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" exitCode=0 Nov 25 16:44:42 crc kubenswrapper[4800]: I1125 16:44:42.933790 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f"} Nov 25 16:44:42 crc kubenswrapper[4800]: I1125 16:44:42.933861 4800 scope.go:117] "RemoveContainer" containerID="76caf072a2083d316b90a70c9add69381a298e2330b9ebc40c47e5ccffc1f713" Nov 25 16:44:42 crc kubenswrapper[4800]: I1125 16:44:42.934568 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:44:42 crc kubenswrapper[4800]: E1125 16:44:42.934903 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:44:57 crc kubenswrapper[4800]: I1125 16:44:57.786462 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:44:57 crc kubenswrapper[4800]: E1125 16:44:57.787265 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.146014 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs"] Nov 25 16:45:00 crc kubenswrapper[4800]: E1125 16:45:00.146892 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerName="extract-utilities" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.146909 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerName="extract-utilities" Nov 25 16:45:00 crc kubenswrapper[4800]: E1125 16:45:00.146937 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce983dc5-5acc-48e3-ada7-16e32a405832" containerName="extract-content" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.146945 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce983dc5-5acc-48e3-ada7-16e32a405832" containerName="extract-content" Nov 25 16:45:00 crc kubenswrapper[4800]: E1125 16:45:00.146967 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce983dc5-5acc-48e3-ada7-16e32a405832" containerName="extract-utilities" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.146974 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce983dc5-5acc-48e3-ada7-16e32a405832" containerName="extract-utilities" Nov 25 16:45:00 crc kubenswrapper[4800]: E1125 16:45:00.147000 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerName="registry-server" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.147007 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerName="registry-server" Nov 25 16:45:00 crc kubenswrapper[4800]: E1125 16:45:00.147027 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce983dc5-5acc-48e3-ada7-16e32a405832" containerName="registry-server" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.147035 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce983dc5-5acc-48e3-ada7-16e32a405832" containerName="registry-server" Nov 25 16:45:00 crc kubenswrapper[4800]: E1125 16:45:00.147045 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerName="extract-content" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.147052 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerName="extract-content" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.147254 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce983dc5-5acc-48e3-ada7-16e32a405832" containerName="registry-server" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.147280 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4487391-49b1-468e-a1d8-7f43ed8dc72a" containerName="registry-server" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.147916 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.149870 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.150145 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.163170 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs"] Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.322138 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-secret-volume\") pod \"collect-profiles-29401485-mf2zs\" (UID: \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.322201 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-config-volume\") pod \"collect-profiles-29401485-mf2zs\" (UID: \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.322666 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69nlx\" (UniqueName: \"kubernetes.io/projected/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-kube-api-access-69nlx\") pod \"collect-profiles-29401485-mf2zs\" (UID: \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.424449 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69nlx\" (UniqueName: \"kubernetes.io/projected/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-kube-api-access-69nlx\") pod \"collect-profiles-29401485-mf2zs\" (UID: \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.424551 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-secret-volume\") pod \"collect-profiles-29401485-mf2zs\" (UID: \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.424588 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-config-volume\") pod \"collect-profiles-29401485-mf2zs\" (UID: \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.425391 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-config-volume\") pod \"collect-profiles-29401485-mf2zs\" (UID: \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.432030 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-secret-volume\") pod \"collect-profiles-29401485-mf2zs\" (UID: \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.439996 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69nlx\" (UniqueName: \"kubernetes.io/projected/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-kube-api-access-69nlx\") pod \"collect-profiles-29401485-mf2zs\" (UID: \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.467712 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:00 crc kubenswrapper[4800]: I1125 16:45:00.929717 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs"] Nov 25 16:45:01 crc kubenswrapper[4800]: I1125 16:45:01.130588 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" event={"ID":"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb","Type":"ContainerStarted","Data":"8a3b5ca151f7af7eef5492ea95a517aa74e319c747f3e12ec0a04d6ae1eaa1ac"} Nov 25 16:45:02 crc kubenswrapper[4800]: I1125 16:45:02.143617 4800 generic.go:334] "Generic (PLEG): container finished" podID="5a33414b-786b-4b41-a5f2-3ec4fa3df4cb" containerID="0728ddfc2512de118284fd4476c1e4580352c0843d7da90b1dd04cd156625594" exitCode=0 Nov 25 16:45:02 crc kubenswrapper[4800]: I1125 16:45:02.143741 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" event={"ID":"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb","Type":"ContainerDied","Data":"0728ddfc2512de118284fd4476c1e4580352c0843d7da90b1dd04cd156625594"} Nov 25 16:45:03 crc kubenswrapper[4800]: I1125 16:45:03.638596 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:03 crc kubenswrapper[4800]: I1125 16:45:03.808527 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-config-volume\") pod \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\" (UID: \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\") " Nov 25 16:45:03 crc kubenswrapper[4800]: I1125 16:45:03.808585 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69nlx\" (UniqueName: \"kubernetes.io/projected/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-kube-api-access-69nlx\") pod \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\" (UID: \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\") " Nov 25 16:45:03 crc kubenswrapper[4800]: I1125 16:45:03.808623 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-secret-volume\") pod \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\" (UID: \"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb\") " Nov 25 16:45:03 crc kubenswrapper[4800]: I1125 16:45:03.810630 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "5a33414b-786b-4b41-a5f2-3ec4fa3df4cb" (UID: "5a33414b-786b-4b41-a5f2-3ec4fa3df4cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:45:03 crc kubenswrapper[4800]: I1125 16:45:03.831046 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-kube-api-access-69nlx" (OuterVolumeSpecName: "kube-api-access-69nlx") pod "5a33414b-786b-4b41-a5f2-3ec4fa3df4cb" (UID: "5a33414b-786b-4b41-a5f2-3ec4fa3df4cb"). InnerVolumeSpecName "kube-api-access-69nlx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:45:03 crc kubenswrapper[4800]: I1125 16:45:03.851029 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5a33414b-786b-4b41-a5f2-3ec4fa3df4cb" (UID: "5a33414b-786b-4b41-a5f2-3ec4fa3df4cb"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:45:03 crc kubenswrapper[4800]: I1125 16:45:03.911535 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 16:45:03 crc kubenswrapper[4800]: I1125 16:45:03.911575 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69nlx\" (UniqueName: \"kubernetes.io/projected/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-kube-api-access-69nlx\") on node \"crc\" DevicePath \"\"" Nov 25 16:45:03 crc kubenswrapper[4800]: I1125 16:45:03.911584 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 16:45:04 crc kubenswrapper[4800]: I1125 16:45:04.161049 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" event={"ID":"5a33414b-786b-4b41-a5f2-3ec4fa3df4cb","Type":"ContainerDied","Data":"8a3b5ca151f7af7eef5492ea95a517aa74e319c747f3e12ec0a04d6ae1eaa1ac"} Nov 25 16:45:04 crc kubenswrapper[4800]: I1125 16:45:04.161100 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a3b5ca151f7af7eef5492ea95a517aa74e319c747f3e12ec0a04d6ae1eaa1ac" Nov 25 16:45:04 crc kubenswrapper[4800]: I1125 16:45:04.161105 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs" Nov 25 16:45:04 crc kubenswrapper[4800]: I1125 16:45:04.721357 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr"] Nov 25 16:45:04 crc kubenswrapper[4800]: I1125 16:45:04.732036 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401440-sp8hr"] Nov 25 16:45:05 crc kubenswrapper[4800]: I1125 16:45:05.798200 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c852c73-a4a0-470b-a46d-98a1d7408f72" path="/var/lib/kubelet/pods/1c852c73-a4a0-470b-a46d-98a1d7408f72/volumes" Nov 25 16:45:11 crc kubenswrapper[4800]: I1125 16:45:11.785713 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:45:11 crc kubenswrapper[4800]: E1125 16:45:11.786515 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:45:23 crc kubenswrapper[4800]: I1125 16:45:23.786262 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:45:23 crc kubenswrapper[4800]: E1125 16:45:23.787730 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:45:32 crc kubenswrapper[4800]: I1125 16:45:32.231709 4800 scope.go:117] "RemoveContainer" containerID="7f1ca107fa3933a5b37cfdb5e8881fa2f417ec2192865101a38d03cf3eb1a671" Nov 25 16:45:36 crc kubenswrapper[4800]: I1125 16:45:36.785341 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:45:36 crc kubenswrapper[4800]: E1125 16:45:36.786111 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:45:48 crc kubenswrapper[4800]: I1125 16:45:48.785920 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:45:48 crc kubenswrapper[4800]: E1125 16:45:48.786723 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:46:02 crc kubenswrapper[4800]: I1125 16:46:02.785748 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:46:02 crc kubenswrapper[4800]: E1125 16:46:02.786732 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:46:16 crc kubenswrapper[4800]: I1125 16:46:16.786719 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:46:16 crc kubenswrapper[4800]: E1125 16:46:16.787540 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:46:30 crc kubenswrapper[4800]: I1125 16:46:30.785999 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:46:30 crc kubenswrapper[4800]: E1125 16:46:30.787110 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:46:42 crc kubenswrapper[4800]: I1125 16:46:42.785708 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:46:42 crc kubenswrapper[4800]: E1125 16:46:42.786955 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:46:54 crc kubenswrapper[4800]: I1125 16:46:54.785657 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:46:54 crc kubenswrapper[4800]: E1125 16:46:54.786746 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:47:09 crc kubenswrapper[4800]: I1125 16:47:09.802733 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:47:09 crc kubenswrapper[4800]: E1125 16:47:09.803610 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:47:23 crc kubenswrapper[4800]: I1125 16:47:23.786638 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:47:23 crc kubenswrapper[4800]: E1125 16:47:23.788105 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:47:34 crc kubenswrapper[4800]: I1125 16:47:34.785481 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:47:34 crc kubenswrapper[4800]: E1125 16:47:34.786299 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:47:45 crc kubenswrapper[4800]: I1125 16:47:45.786453 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:47:45 crc kubenswrapper[4800]: E1125 16:47:45.787782 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:47:59 crc kubenswrapper[4800]: I1125 16:47:59.791894 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:47:59 crc kubenswrapper[4800]: E1125 16:47:59.792689 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:48:10 crc kubenswrapper[4800]: I1125 16:48:10.786513 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:48:10 crc kubenswrapper[4800]: E1125 16:48:10.787353 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:48:21 crc kubenswrapper[4800]: I1125 16:48:21.785556 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:48:21 crc kubenswrapper[4800]: E1125 16:48:21.786793 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.202030 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vsvqq"] Nov 25 16:48:23 crc kubenswrapper[4800]: E1125 16:48:23.202780 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a33414b-786b-4b41-a5f2-3ec4fa3df4cb" containerName="collect-profiles" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.202796 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a33414b-786b-4b41-a5f2-3ec4fa3df4cb" containerName="collect-profiles" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.203026 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a33414b-786b-4b41-a5f2-3ec4fa3df4cb" containerName="collect-profiles" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.204938 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.223102 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vsvqq"] Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.262142 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kghf6\" (UniqueName: \"kubernetes.io/projected/24102a24-f1b0-4b69-b64c-b26e276abe61-kube-api-access-kghf6\") pod \"certified-operators-vsvqq\" (UID: \"24102a24-f1b0-4b69-b64c-b26e276abe61\") " pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.262194 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24102a24-f1b0-4b69-b64c-b26e276abe61-catalog-content\") pod \"certified-operators-vsvqq\" (UID: \"24102a24-f1b0-4b69-b64c-b26e276abe61\") " pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.262324 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24102a24-f1b0-4b69-b64c-b26e276abe61-utilities\") pod \"certified-operators-vsvqq\" (UID: \"24102a24-f1b0-4b69-b64c-b26e276abe61\") " pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.364087 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kghf6\" (UniqueName: \"kubernetes.io/projected/24102a24-f1b0-4b69-b64c-b26e276abe61-kube-api-access-kghf6\") pod \"certified-operators-vsvqq\" (UID: \"24102a24-f1b0-4b69-b64c-b26e276abe61\") " pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.364584 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24102a24-f1b0-4b69-b64c-b26e276abe61-catalog-content\") pod \"certified-operators-vsvqq\" (UID: \"24102a24-f1b0-4b69-b64c-b26e276abe61\") " pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.365203 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24102a24-f1b0-4b69-b64c-b26e276abe61-catalog-content\") pod \"certified-operators-vsvqq\" (UID: \"24102a24-f1b0-4b69-b64c-b26e276abe61\") " pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.365584 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24102a24-f1b0-4b69-b64c-b26e276abe61-utilities\") pod \"certified-operators-vsvqq\" (UID: \"24102a24-f1b0-4b69-b64c-b26e276abe61\") " pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.365907 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24102a24-f1b0-4b69-b64c-b26e276abe61-utilities\") pod \"certified-operators-vsvqq\" (UID: \"24102a24-f1b0-4b69-b64c-b26e276abe61\") " pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.387540 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kghf6\" (UniqueName: \"kubernetes.io/projected/24102a24-f1b0-4b69-b64c-b26e276abe61-kube-api-access-kghf6\") pod \"certified-operators-vsvqq\" (UID: \"24102a24-f1b0-4b69-b64c-b26e276abe61\") " pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:23 crc kubenswrapper[4800]: I1125 16:48:23.522626 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:24 crc kubenswrapper[4800]: I1125 16:48:24.087039 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vsvqq"] Nov 25 16:48:24 crc kubenswrapper[4800]: E1125 16:48:24.505765 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24102a24_f1b0_4b69_b64c_b26e276abe61.slice/crio-conmon-79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24102a24_f1b0_4b69_b64c_b26e276abe61.slice/crio-79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b.scope\": RecentStats: unable to find data in memory cache]" Nov 25 16:48:24 crc kubenswrapper[4800]: I1125 16:48:24.957564 4800 generic.go:334] "Generic (PLEG): container finished" podID="24102a24-f1b0-4b69-b64c-b26e276abe61" containerID="79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b" exitCode=0 Nov 25 16:48:24 crc kubenswrapper[4800]: I1125 16:48:24.957764 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsvqq" event={"ID":"24102a24-f1b0-4b69-b64c-b26e276abe61","Type":"ContainerDied","Data":"79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b"} Nov 25 16:48:24 crc kubenswrapper[4800]: I1125 16:48:24.976955 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsvqq" event={"ID":"24102a24-f1b0-4b69-b64c-b26e276abe61","Type":"ContainerStarted","Data":"0e7851426993f95931a9fc82b2e5a3bf01f258ee4784d42f031fa299783aa9e7"} Nov 25 16:48:24 crc kubenswrapper[4800]: I1125 16:48:24.978592 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 16:48:25 crc kubenswrapper[4800]: I1125 16:48:25.970688 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsvqq" event={"ID":"24102a24-f1b0-4b69-b64c-b26e276abe61","Type":"ContainerStarted","Data":"2dff7b545c6fb111ec5b958b62b89f994a9aae40dcfd221ea8bfff17a1939fb4"} Nov 25 16:48:27 crc kubenswrapper[4800]: I1125 16:48:27.988860 4800 generic.go:334] "Generic (PLEG): container finished" podID="24102a24-f1b0-4b69-b64c-b26e276abe61" containerID="2dff7b545c6fb111ec5b958b62b89f994a9aae40dcfd221ea8bfff17a1939fb4" exitCode=0 Nov 25 16:48:27 crc kubenswrapper[4800]: I1125 16:48:27.988956 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsvqq" event={"ID":"24102a24-f1b0-4b69-b64c-b26e276abe61","Type":"ContainerDied","Data":"2dff7b545c6fb111ec5b958b62b89f994a9aae40dcfd221ea8bfff17a1939fb4"} Nov 25 16:48:29 crc kubenswrapper[4800]: I1125 16:48:28.999927 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsvqq" event={"ID":"24102a24-f1b0-4b69-b64c-b26e276abe61","Type":"ContainerStarted","Data":"3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3"} Nov 25 16:48:29 crc kubenswrapper[4800]: I1125 16:48:29.029430 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vsvqq" podStartSLOduration=2.625150313 podStartE2EDuration="6.029409458s" podCreationTimestamp="2025-11-25 16:48:23 +0000 UTC" firstStartedPulling="2025-11-25 16:48:24.978202701 +0000 UTC m=+5466.032611183" lastFinishedPulling="2025-11-25 16:48:28.382461846 +0000 UTC m=+5469.436870328" observedRunningTime="2025-11-25 16:48:29.019903259 +0000 UTC m=+5470.074311761" watchObservedRunningTime="2025-11-25 16:48:29.029409458 +0000 UTC m=+5470.083817940" Nov 25 16:48:33 crc kubenswrapper[4800]: I1125 16:48:33.522777 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:33 crc kubenswrapper[4800]: I1125 16:48:33.523130 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:33 crc kubenswrapper[4800]: I1125 16:48:33.572466 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:34 crc kubenswrapper[4800]: I1125 16:48:34.091639 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:34 crc kubenswrapper[4800]: I1125 16:48:34.149408 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vsvqq"] Nov 25 16:48:36 crc kubenswrapper[4800]: I1125 16:48:36.064391 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vsvqq" podUID="24102a24-f1b0-4b69-b64c-b26e276abe61" containerName="registry-server" containerID="cri-o://3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3" gracePeriod=2 Nov 25 16:48:36 crc kubenswrapper[4800]: I1125 16:48:36.714545 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:36 crc kubenswrapper[4800]: I1125 16:48:36.737325 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24102a24-f1b0-4b69-b64c-b26e276abe61-catalog-content\") pod \"24102a24-f1b0-4b69-b64c-b26e276abe61\" (UID: \"24102a24-f1b0-4b69-b64c-b26e276abe61\") " Nov 25 16:48:36 crc kubenswrapper[4800]: I1125 16:48:36.737409 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24102a24-f1b0-4b69-b64c-b26e276abe61-utilities\") pod \"24102a24-f1b0-4b69-b64c-b26e276abe61\" (UID: \"24102a24-f1b0-4b69-b64c-b26e276abe61\") " Nov 25 16:48:36 crc kubenswrapper[4800]: I1125 16:48:36.737531 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kghf6\" (UniqueName: \"kubernetes.io/projected/24102a24-f1b0-4b69-b64c-b26e276abe61-kube-api-access-kghf6\") pod \"24102a24-f1b0-4b69-b64c-b26e276abe61\" (UID: \"24102a24-f1b0-4b69-b64c-b26e276abe61\") " Nov 25 16:48:36 crc kubenswrapper[4800]: I1125 16:48:36.740888 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24102a24-f1b0-4b69-b64c-b26e276abe61-utilities" (OuterVolumeSpecName: "utilities") pod "24102a24-f1b0-4b69-b64c-b26e276abe61" (UID: "24102a24-f1b0-4b69-b64c-b26e276abe61"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:48:36 crc kubenswrapper[4800]: I1125 16:48:36.757891 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24102a24-f1b0-4b69-b64c-b26e276abe61-kube-api-access-kghf6" (OuterVolumeSpecName: "kube-api-access-kghf6") pod "24102a24-f1b0-4b69-b64c-b26e276abe61" (UID: "24102a24-f1b0-4b69-b64c-b26e276abe61"). InnerVolumeSpecName "kube-api-access-kghf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:48:36 crc kubenswrapper[4800]: I1125 16:48:36.785564 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:48:36 crc kubenswrapper[4800]: E1125 16:48:36.789443 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:48:36 crc kubenswrapper[4800]: I1125 16:48:36.841097 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24102a24-f1b0-4b69-b64c-b26e276abe61-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:48:36 crc kubenswrapper[4800]: I1125 16:48:36.841136 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kghf6\" (UniqueName: \"kubernetes.io/projected/24102a24-f1b0-4b69-b64c-b26e276abe61-kube-api-access-kghf6\") on node \"crc\" DevicePath \"\"" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.078155 4800 generic.go:334] "Generic (PLEG): container finished" podID="24102a24-f1b0-4b69-b64c-b26e276abe61" containerID="3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3" exitCode=0 Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.078203 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsvqq" event={"ID":"24102a24-f1b0-4b69-b64c-b26e276abe61","Type":"ContainerDied","Data":"3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3"} Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.078251 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsvqq" event={"ID":"24102a24-f1b0-4b69-b64c-b26e276abe61","Type":"ContainerDied","Data":"0e7851426993f95931a9fc82b2e5a3bf01f258ee4784d42f031fa299783aa9e7"} Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.078258 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vsvqq" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.078273 4800 scope.go:117] "RemoveContainer" containerID="3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.106600 4800 scope.go:117] "RemoveContainer" containerID="2dff7b545c6fb111ec5b958b62b89f994a9aae40dcfd221ea8bfff17a1939fb4" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.107489 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24102a24-f1b0-4b69-b64c-b26e276abe61-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24102a24-f1b0-4b69-b64c-b26e276abe61" (UID: "24102a24-f1b0-4b69-b64c-b26e276abe61"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.139277 4800 scope.go:117] "RemoveContainer" containerID="79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.148494 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24102a24-f1b0-4b69-b64c-b26e276abe61-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.198594 4800 scope.go:117] "RemoveContainer" containerID="3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3" Nov 25 16:48:37 crc kubenswrapper[4800]: E1125 16:48:37.199147 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3\": container with ID starting with 3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3 not found: ID does not exist" containerID="3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.199198 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3"} err="failed to get container status \"3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3\": rpc error: code = NotFound desc = could not find container \"3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3\": container with ID starting with 3a4c9eaf5e8a890320cadf3f191038b6f842668753e22f0d760f177955a712c3 not found: ID does not exist" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.199227 4800 scope.go:117] "RemoveContainer" containerID="2dff7b545c6fb111ec5b958b62b89f994a9aae40dcfd221ea8bfff17a1939fb4" Nov 25 16:48:37 crc kubenswrapper[4800]: E1125 16:48:37.199674 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2dff7b545c6fb111ec5b958b62b89f994a9aae40dcfd221ea8bfff17a1939fb4\": container with ID starting with 2dff7b545c6fb111ec5b958b62b89f994a9aae40dcfd221ea8bfff17a1939fb4 not found: ID does not exist" containerID="2dff7b545c6fb111ec5b958b62b89f994a9aae40dcfd221ea8bfff17a1939fb4" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.199716 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2dff7b545c6fb111ec5b958b62b89f994a9aae40dcfd221ea8bfff17a1939fb4"} err="failed to get container status \"2dff7b545c6fb111ec5b958b62b89f994a9aae40dcfd221ea8bfff17a1939fb4\": rpc error: code = NotFound desc = could not find container \"2dff7b545c6fb111ec5b958b62b89f994a9aae40dcfd221ea8bfff17a1939fb4\": container with ID starting with 2dff7b545c6fb111ec5b958b62b89f994a9aae40dcfd221ea8bfff17a1939fb4 not found: ID does not exist" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.199739 4800 scope.go:117] "RemoveContainer" containerID="79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b" Nov 25 16:48:37 crc kubenswrapper[4800]: E1125 16:48:37.200470 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b\": container with ID starting with 79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b not found: ID does not exist" containerID="79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.200498 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b"} err="failed to get container status \"79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b\": rpc error: code = NotFound desc = could not find container \"79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b\": container with ID starting with 79749943f78b25756b322fdad749fe8ae24a1eb2fc1c21be38ce2713fa2ff44b not found: ID does not exist" Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.414506 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vsvqq"] Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.423479 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vsvqq"] Nov 25 16:48:37 crc kubenswrapper[4800]: I1125 16:48:37.796262 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24102a24-f1b0-4b69-b64c-b26e276abe61" path="/var/lib/kubelet/pods/24102a24-f1b0-4b69-b64c-b26e276abe61/volumes" Nov 25 16:48:51 crc kubenswrapper[4800]: I1125 16:48:51.785065 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:48:51 crc kubenswrapper[4800]: E1125 16:48:51.785792 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:49:03 crc kubenswrapper[4800]: I1125 16:49:03.785791 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:49:03 crc kubenswrapper[4800]: E1125 16:49:03.786529 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.815974 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lz7qs"] Nov 25 16:49:11 crc kubenswrapper[4800]: E1125 16:49:11.817027 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24102a24-f1b0-4b69-b64c-b26e276abe61" containerName="extract-utilities" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.817045 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="24102a24-f1b0-4b69-b64c-b26e276abe61" containerName="extract-utilities" Nov 25 16:49:11 crc kubenswrapper[4800]: E1125 16:49:11.817064 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24102a24-f1b0-4b69-b64c-b26e276abe61" containerName="extract-content" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.817072 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="24102a24-f1b0-4b69-b64c-b26e276abe61" containerName="extract-content" Nov 25 16:49:11 crc kubenswrapper[4800]: E1125 16:49:11.817095 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24102a24-f1b0-4b69-b64c-b26e276abe61" containerName="registry-server" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.817103 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="24102a24-f1b0-4b69-b64c-b26e276abe61" containerName="registry-server" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.817338 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="24102a24-f1b0-4b69-b64c-b26e276abe61" containerName="registry-server" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.819052 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.829829 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lz7qs"] Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.832942 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1ff6571-8945-41b2-b4e3-1df144d02782-catalog-content\") pod \"redhat-marketplace-lz7qs\" (UID: \"c1ff6571-8945-41b2-b4e3-1df144d02782\") " pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.833170 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1ff6571-8945-41b2-b4e3-1df144d02782-utilities\") pod \"redhat-marketplace-lz7qs\" (UID: \"c1ff6571-8945-41b2-b4e3-1df144d02782\") " pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.833391 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7hhh\" (UniqueName: \"kubernetes.io/projected/c1ff6571-8945-41b2-b4e3-1df144d02782-kube-api-access-p7hhh\") pod \"redhat-marketplace-lz7qs\" (UID: \"c1ff6571-8945-41b2-b4e3-1df144d02782\") " pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.942592 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1ff6571-8945-41b2-b4e3-1df144d02782-catalog-content\") pod \"redhat-marketplace-lz7qs\" (UID: \"c1ff6571-8945-41b2-b4e3-1df144d02782\") " pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.943574 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1ff6571-8945-41b2-b4e3-1df144d02782-catalog-content\") pod \"redhat-marketplace-lz7qs\" (UID: \"c1ff6571-8945-41b2-b4e3-1df144d02782\") " pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.943236 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1ff6571-8945-41b2-b4e3-1df144d02782-utilities\") pod \"redhat-marketplace-lz7qs\" (UID: \"c1ff6571-8945-41b2-b4e3-1df144d02782\") " pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.943952 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1ff6571-8945-41b2-b4e3-1df144d02782-utilities\") pod \"redhat-marketplace-lz7qs\" (UID: \"c1ff6571-8945-41b2-b4e3-1df144d02782\") " pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:11 crc kubenswrapper[4800]: I1125 16:49:11.943967 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7hhh\" (UniqueName: \"kubernetes.io/projected/c1ff6571-8945-41b2-b4e3-1df144d02782-kube-api-access-p7hhh\") pod \"redhat-marketplace-lz7qs\" (UID: \"c1ff6571-8945-41b2-b4e3-1df144d02782\") " pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:12 crc kubenswrapper[4800]: I1125 16:49:12.378393 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7hhh\" (UniqueName: \"kubernetes.io/projected/c1ff6571-8945-41b2-b4e3-1df144d02782-kube-api-access-p7hhh\") pod \"redhat-marketplace-lz7qs\" (UID: \"c1ff6571-8945-41b2-b4e3-1df144d02782\") " pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:12 crc kubenswrapper[4800]: I1125 16:49:12.450670 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:12 crc kubenswrapper[4800]: I1125 16:49:12.923967 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lz7qs"] Nov 25 16:49:13 crc kubenswrapper[4800]: I1125 16:49:13.408140 4800 generic.go:334] "Generic (PLEG): container finished" podID="c1ff6571-8945-41b2-b4e3-1df144d02782" containerID="1a3a13a0b6630d5c8ce72be79ac0938b2eb0f15658eeef9e924814267dde6fd0" exitCode=0 Nov 25 16:49:13 crc kubenswrapper[4800]: I1125 16:49:13.408211 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lz7qs" event={"ID":"c1ff6571-8945-41b2-b4e3-1df144d02782","Type":"ContainerDied","Data":"1a3a13a0b6630d5c8ce72be79ac0938b2eb0f15658eeef9e924814267dde6fd0"} Nov 25 16:49:13 crc kubenswrapper[4800]: I1125 16:49:13.408260 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lz7qs" event={"ID":"c1ff6571-8945-41b2-b4e3-1df144d02782","Type":"ContainerStarted","Data":"352e2c2d127f902f750d671aea902ba63d6aeceaf705e4048dfd2170869881b8"} Nov 25 16:49:15 crc kubenswrapper[4800]: I1125 16:49:15.426116 4800 generic.go:334] "Generic (PLEG): container finished" podID="c1ff6571-8945-41b2-b4e3-1df144d02782" containerID="b63bfd1909319f73a6702b763650f1713983f4c1c664dce19084aad16fa7587c" exitCode=0 Nov 25 16:49:15 crc kubenswrapper[4800]: I1125 16:49:15.426208 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lz7qs" event={"ID":"c1ff6571-8945-41b2-b4e3-1df144d02782","Type":"ContainerDied","Data":"b63bfd1909319f73a6702b763650f1713983f4c1c664dce19084aad16fa7587c"} Nov 25 16:49:16 crc kubenswrapper[4800]: I1125 16:49:16.438550 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lz7qs" event={"ID":"c1ff6571-8945-41b2-b4e3-1df144d02782","Type":"ContainerStarted","Data":"4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed"} Nov 25 16:49:16 crc kubenswrapper[4800]: I1125 16:49:16.456491 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lz7qs" podStartSLOduration=2.982637557 podStartE2EDuration="5.456471372s" podCreationTimestamp="2025-11-25 16:49:11 +0000 UTC" firstStartedPulling="2025-11-25 16:49:13.410625247 +0000 UTC m=+5514.465033729" lastFinishedPulling="2025-11-25 16:49:15.884459022 +0000 UTC m=+5516.938867544" observedRunningTime="2025-11-25 16:49:16.454253022 +0000 UTC m=+5517.508661504" watchObservedRunningTime="2025-11-25 16:49:16.456471372 +0000 UTC m=+5517.510879854" Nov 25 16:49:18 crc kubenswrapper[4800]: I1125 16:49:18.786736 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:49:18 crc kubenswrapper[4800]: E1125 16:49:18.787556 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:49:22 crc kubenswrapper[4800]: I1125 16:49:22.450958 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:22 crc kubenswrapper[4800]: I1125 16:49:22.451480 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:22 crc kubenswrapper[4800]: I1125 16:49:22.502186 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:22 crc kubenswrapper[4800]: I1125 16:49:22.561408 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:22 crc kubenswrapper[4800]: I1125 16:49:22.745456 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lz7qs"] Nov 25 16:49:24 crc kubenswrapper[4800]: I1125 16:49:24.514098 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lz7qs" podUID="c1ff6571-8945-41b2-b4e3-1df144d02782" containerName="registry-server" containerID="cri-o://4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed" gracePeriod=2 Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.471784 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.530315 4800 generic.go:334] "Generic (PLEG): container finished" podID="c1ff6571-8945-41b2-b4e3-1df144d02782" containerID="4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed" exitCode=0 Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.530671 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lz7qs" event={"ID":"c1ff6571-8945-41b2-b4e3-1df144d02782","Type":"ContainerDied","Data":"4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed"} Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.530721 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lz7qs" event={"ID":"c1ff6571-8945-41b2-b4e3-1df144d02782","Type":"ContainerDied","Data":"352e2c2d127f902f750d671aea902ba63d6aeceaf705e4048dfd2170869881b8"} Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.530744 4800 scope.go:117] "RemoveContainer" containerID="4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.531021 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lz7qs" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.556926 4800 scope.go:117] "RemoveContainer" containerID="b63bfd1909319f73a6702b763650f1713983f4c1c664dce19084aad16fa7587c" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.578926 4800 scope.go:117] "RemoveContainer" containerID="1a3a13a0b6630d5c8ce72be79ac0938b2eb0f15658eeef9e924814267dde6fd0" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.605869 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1ff6571-8945-41b2-b4e3-1df144d02782-catalog-content\") pod \"c1ff6571-8945-41b2-b4e3-1df144d02782\" (UID: \"c1ff6571-8945-41b2-b4e3-1df144d02782\") " Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.606081 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p7hhh\" (UniqueName: \"kubernetes.io/projected/c1ff6571-8945-41b2-b4e3-1df144d02782-kube-api-access-p7hhh\") pod \"c1ff6571-8945-41b2-b4e3-1df144d02782\" (UID: \"c1ff6571-8945-41b2-b4e3-1df144d02782\") " Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.606127 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1ff6571-8945-41b2-b4e3-1df144d02782-utilities\") pod \"c1ff6571-8945-41b2-b4e3-1df144d02782\" (UID: \"c1ff6571-8945-41b2-b4e3-1df144d02782\") " Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.607612 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1ff6571-8945-41b2-b4e3-1df144d02782-utilities" (OuterVolumeSpecName: "utilities") pod "c1ff6571-8945-41b2-b4e3-1df144d02782" (UID: "c1ff6571-8945-41b2-b4e3-1df144d02782"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.613536 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1ff6571-8945-41b2-b4e3-1df144d02782-kube-api-access-p7hhh" (OuterVolumeSpecName: "kube-api-access-p7hhh") pod "c1ff6571-8945-41b2-b4e3-1df144d02782" (UID: "c1ff6571-8945-41b2-b4e3-1df144d02782"). InnerVolumeSpecName "kube-api-access-p7hhh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.627773 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1ff6571-8945-41b2-b4e3-1df144d02782-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c1ff6571-8945-41b2-b4e3-1df144d02782" (UID: "c1ff6571-8945-41b2-b4e3-1df144d02782"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.630667 4800 scope.go:117] "RemoveContainer" containerID="4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed" Nov 25 16:49:25 crc kubenswrapper[4800]: E1125 16:49:25.631105 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed\": container with ID starting with 4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed not found: ID does not exist" containerID="4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.631139 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed"} err="failed to get container status \"4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed\": rpc error: code = NotFound desc = could not find container \"4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed\": container with ID starting with 4986f7c8f351ef67e0c94becbdac3288dc37c213b76a8ffa784dc0400b034aed not found: ID does not exist" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.631159 4800 scope.go:117] "RemoveContainer" containerID="b63bfd1909319f73a6702b763650f1713983f4c1c664dce19084aad16fa7587c" Nov 25 16:49:25 crc kubenswrapper[4800]: E1125 16:49:25.631371 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b63bfd1909319f73a6702b763650f1713983f4c1c664dce19084aad16fa7587c\": container with ID starting with b63bfd1909319f73a6702b763650f1713983f4c1c664dce19084aad16fa7587c not found: ID does not exist" containerID="b63bfd1909319f73a6702b763650f1713983f4c1c664dce19084aad16fa7587c" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.631432 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b63bfd1909319f73a6702b763650f1713983f4c1c664dce19084aad16fa7587c"} err="failed to get container status \"b63bfd1909319f73a6702b763650f1713983f4c1c664dce19084aad16fa7587c\": rpc error: code = NotFound desc = could not find container \"b63bfd1909319f73a6702b763650f1713983f4c1c664dce19084aad16fa7587c\": container with ID starting with b63bfd1909319f73a6702b763650f1713983f4c1c664dce19084aad16fa7587c not found: ID does not exist" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.631453 4800 scope.go:117] "RemoveContainer" containerID="1a3a13a0b6630d5c8ce72be79ac0938b2eb0f15658eeef9e924814267dde6fd0" Nov 25 16:49:25 crc kubenswrapper[4800]: E1125 16:49:25.631664 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a3a13a0b6630d5c8ce72be79ac0938b2eb0f15658eeef9e924814267dde6fd0\": container with ID starting with 1a3a13a0b6630d5c8ce72be79ac0938b2eb0f15658eeef9e924814267dde6fd0 not found: ID does not exist" containerID="1a3a13a0b6630d5c8ce72be79ac0938b2eb0f15658eeef9e924814267dde6fd0" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.631687 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a3a13a0b6630d5c8ce72be79ac0938b2eb0f15658eeef9e924814267dde6fd0"} err="failed to get container status \"1a3a13a0b6630d5c8ce72be79ac0938b2eb0f15658eeef9e924814267dde6fd0\": rpc error: code = NotFound desc = could not find container \"1a3a13a0b6630d5c8ce72be79ac0938b2eb0f15658eeef9e924814267dde6fd0\": container with ID starting with 1a3a13a0b6630d5c8ce72be79ac0938b2eb0f15658eeef9e924814267dde6fd0 not found: ID does not exist" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.709088 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p7hhh\" (UniqueName: \"kubernetes.io/projected/c1ff6571-8945-41b2-b4e3-1df144d02782-kube-api-access-p7hhh\") on node \"crc\" DevicePath \"\"" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.709121 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1ff6571-8945-41b2-b4e3-1df144d02782-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.709132 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1ff6571-8945-41b2-b4e3-1df144d02782-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.850562 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lz7qs"] Nov 25 16:49:25 crc kubenswrapper[4800]: I1125 16:49:25.860804 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lz7qs"] Nov 25 16:49:27 crc kubenswrapper[4800]: I1125 16:49:27.796614 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1ff6571-8945-41b2-b4e3-1df144d02782" path="/var/lib/kubelet/pods/c1ff6571-8945-41b2-b4e3-1df144d02782/volumes" Nov 25 16:49:29 crc kubenswrapper[4800]: I1125 16:49:29.791717 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:49:29 crc kubenswrapper[4800]: E1125 16:49:29.792325 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:49:41 crc kubenswrapper[4800]: I1125 16:49:41.785253 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:49:41 crc kubenswrapper[4800]: E1125 16:49:41.786111 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:49:54 crc kubenswrapper[4800]: I1125 16:49:54.785228 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:49:55 crc kubenswrapper[4800]: I1125 16:49:55.865887 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"5590c6db652dfe7f3257f0876f6ffc4b23529a1f4f3ebfd0a79ee46777b669af"} Nov 25 16:51:59 crc kubenswrapper[4800]: I1125 16:51:59.888580 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7bvfj"] Nov 25 16:51:59 crc kubenswrapper[4800]: E1125 16:51:59.889621 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1ff6571-8945-41b2-b4e3-1df144d02782" containerName="extract-utilities" Nov 25 16:51:59 crc kubenswrapper[4800]: I1125 16:51:59.889639 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1ff6571-8945-41b2-b4e3-1df144d02782" containerName="extract-utilities" Nov 25 16:51:59 crc kubenswrapper[4800]: E1125 16:51:59.889659 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1ff6571-8945-41b2-b4e3-1df144d02782" containerName="extract-content" Nov 25 16:51:59 crc kubenswrapper[4800]: I1125 16:51:59.889667 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1ff6571-8945-41b2-b4e3-1df144d02782" containerName="extract-content" Nov 25 16:51:59 crc kubenswrapper[4800]: E1125 16:51:59.889691 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1ff6571-8945-41b2-b4e3-1df144d02782" containerName="registry-server" Nov 25 16:51:59 crc kubenswrapper[4800]: I1125 16:51:59.889699 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1ff6571-8945-41b2-b4e3-1df144d02782" containerName="registry-server" Nov 25 16:51:59 crc kubenswrapper[4800]: I1125 16:51:59.889968 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1ff6571-8945-41b2-b4e3-1df144d02782" containerName="registry-server" Nov 25 16:51:59 crc kubenswrapper[4800]: I1125 16:51:59.891512 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:51:59 crc kubenswrapper[4800]: I1125 16:51:59.907032 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7bvfj"] Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.030934 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/919924b0-a701-429d-98d2-c812d136b74b-utilities\") pod \"redhat-operators-7bvfj\" (UID: \"919924b0-a701-429d-98d2-c812d136b74b\") " pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.030986 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69frh\" (UniqueName: \"kubernetes.io/projected/919924b0-a701-429d-98d2-c812d136b74b-kube-api-access-69frh\") pod \"redhat-operators-7bvfj\" (UID: \"919924b0-a701-429d-98d2-c812d136b74b\") " pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.031012 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/919924b0-a701-429d-98d2-c812d136b74b-catalog-content\") pod \"redhat-operators-7bvfj\" (UID: \"919924b0-a701-429d-98d2-c812d136b74b\") " pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.132772 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/919924b0-a701-429d-98d2-c812d136b74b-utilities\") pod \"redhat-operators-7bvfj\" (UID: \"919924b0-a701-429d-98d2-c812d136b74b\") " pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.133082 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69frh\" (UniqueName: \"kubernetes.io/projected/919924b0-a701-429d-98d2-c812d136b74b-kube-api-access-69frh\") pod \"redhat-operators-7bvfj\" (UID: \"919924b0-a701-429d-98d2-c812d136b74b\") " pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.133105 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/919924b0-a701-429d-98d2-c812d136b74b-catalog-content\") pod \"redhat-operators-7bvfj\" (UID: \"919924b0-a701-429d-98d2-c812d136b74b\") " pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.133316 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/919924b0-a701-429d-98d2-c812d136b74b-utilities\") pod \"redhat-operators-7bvfj\" (UID: \"919924b0-a701-429d-98d2-c812d136b74b\") " pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.133510 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/919924b0-a701-429d-98d2-c812d136b74b-catalog-content\") pod \"redhat-operators-7bvfj\" (UID: \"919924b0-a701-429d-98d2-c812d136b74b\") " pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.154352 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69frh\" (UniqueName: \"kubernetes.io/projected/919924b0-a701-429d-98d2-c812d136b74b-kube-api-access-69frh\") pod \"redhat-operators-7bvfj\" (UID: \"919924b0-a701-429d-98d2-c812d136b74b\") " pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.217073 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.793760 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7bvfj"] Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.983125 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bvfj" event={"ID":"919924b0-a701-429d-98d2-c812d136b74b","Type":"ContainerStarted","Data":"6b497a5b99932662856d93d8e41695be165881ac0ee2189a73797a1f50bbba9d"} Nov 25 16:52:00 crc kubenswrapper[4800]: I1125 16:52:00.983183 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bvfj" event={"ID":"919924b0-a701-429d-98d2-c812d136b74b","Type":"ContainerStarted","Data":"35186c944067a2fafc482de75b45e98baefa106ab1c5e5bd56c9755938ba0a0a"} Nov 25 16:52:01 crc kubenswrapper[4800]: I1125 16:52:01.994019 4800 generic.go:334] "Generic (PLEG): container finished" podID="919924b0-a701-429d-98d2-c812d136b74b" containerID="6b497a5b99932662856d93d8e41695be165881ac0ee2189a73797a1f50bbba9d" exitCode=0 Nov 25 16:52:01 crc kubenswrapper[4800]: I1125 16:52:01.994074 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bvfj" event={"ID":"919924b0-a701-429d-98d2-c812d136b74b","Type":"ContainerDied","Data":"6b497a5b99932662856d93d8e41695be165881ac0ee2189a73797a1f50bbba9d"} Nov 25 16:52:03 crc kubenswrapper[4800]: I1125 16:52:03.011175 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bvfj" event={"ID":"919924b0-a701-429d-98d2-c812d136b74b","Type":"ContainerStarted","Data":"211c667bd54c1540e90c13f59ab5cb9a180959bb2ff791066fd0e6d5991fbf46"} Nov 25 16:52:06 crc kubenswrapper[4800]: I1125 16:52:06.046109 4800 generic.go:334] "Generic (PLEG): container finished" podID="919924b0-a701-429d-98d2-c812d136b74b" containerID="211c667bd54c1540e90c13f59ab5cb9a180959bb2ff791066fd0e6d5991fbf46" exitCode=0 Nov 25 16:52:06 crc kubenswrapper[4800]: I1125 16:52:06.046936 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bvfj" event={"ID":"919924b0-a701-429d-98d2-c812d136b74b","Type":"ContainerDied","Data":"211c667bd54c1540e90c13f59ab5cb9a180959bb2ff791066fd0e6d5991fbf46"} Nov 25 16:52:07 crc kubenswrapper[4800]: I1125 16:52:07.060557 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bvfj" event={"ID":"919924b0-a701-429d-98d2-c812d136b74b","Type":"ContainerStarted","Data":"03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70"} Nov 25 16:52:07 crc kubenswrapper[4800]: I1125 16:52:07.083024 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7bvfj" podStartSLOduration=3.631475363 podStartE2EDuration="8.083006084s" podCreationTimestamp="2025-11-25 16:51:59 +0000 UTC" firstStartedPulling="2025-11-25 16:52:01.996807411 +0000 UTC m=+5683.051215903" lastFinishedPulling="2025-11-25 16:52:06.448338142 +0000 UTC m=+5687.502746624" observedRunningTime="2025-11-25 16:52:07.07883002 +0000 UTC m=+5688.133238502" watchObservedRunningTime="2025-11-25 16:52:07.083006084 +0000 UTC m=+5688.137414566" Nov 25 16:52:10 crc kubenswrapper[4800]: I1125 16:52:10.217619 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:10 crc kubenswrapper[4800]: I1125 16:52:10.217723 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:11 crc kubenswrapper[4800]: I1125 16:52:11.299301 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7bvfj" podUID="919924b0-a701-429d-98d2-c812d136b74b" containerName="registry-server" probeResult="failure" output=< Nov 25 16:52:11 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 16:52:11 crc kubenswrapper[4800]: > Nov 25 16:52:12 crc kubenswrapper[4800]: I1125 16:52:12.640021 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:52:12 crc kubenswrapper[4800]: I1125 16:52:12.640419 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:52:20 crc kubenswrapper[4800]: I1125 16:52:20.269518 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:20 crc kubenswrapper[4800]: I1125 16:52:20.329433 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:20 crc kubenswrapper[4800]: I1125 16:52:20.509518 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7bvfj"] Nov 25 16:52:22 crc kubenswrapper[4800]: I1125 16:52:22.201872 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7bvfj" podUID="919924b0-a701-429d-98d2-c812d136b74b" containerName="registry-server" containerID="cri-o://03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70" gracePeriod=2 Nov 25 16:52:22 crc kubenswrapper[4800]: I1125 16:52:22.884765 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:22 crc kubenswrapper[4800]: I1125 16:52:22.997183 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/919924b0-a701-429d-98d2-c812d136b74b-utilities\") pod \"919924b0-a701-429d-98d2-c812d136b74b\" (UID: \"919924b0-a701-429d-98d2-c812d136b74b\") " Nov 25 16:52:22 crc kubenswrapper[4800]: I1125 16:52:22.997460 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/919924b0-a701-429d-98d2-c812d136b74b-catalog-content\") pod \"919924b0-a701-429d-98d2-c812d136b74b\" (UID: \"919924b0-a701-429d-98d2-c812d136b74b\") " Nov 25 16:52:22 crc kubenswrapper[4800]: I1125 16:52:22.997540 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69frh\" (UniqueName: \"kubernetes.io/projected/919924b0-a701-429d-98d2-c812d136b74b-kube-api-access-69frh\") pod \"919924b0-a701-429d-98d2-c812d136b74b\" (UID: \"919924b0-a701-429d-98d2-c812d136b74b\") " Nov 25 16:52:22 crc kubenswrapper[4800]: I1125 16:52:22.998937 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/919924b0-a701-429d-98d2-c812d136b74b-utilities" (OuterVolumeSpecName: "utilities") pod "919924b0-a701-429d-98d2-c812d136b74b" (UID: "919924b0-a701-429d-98d2-c812d136b74b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.007030 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/919924b0-a701-429d-98d2-c812d136b74b-kube-api-access-69frh" (OuterVolumeSpecName: "kube-api-access-69frh") pod "919924b0-a701-429d-98d2-c812d136b74b" (UID: "919924b0-a701-429d-98d2-c812d136b74b"). InnerVolumeSpecName "kube-api-access-69frh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.100132 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69frh\" (UniqueName: \"kubernetes.io/projected/919924b0-a701-429d-98d2-c812d136b74b-kube-api-access-69frh\") on node \"crc\" DevicePath \"\"" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.100522 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/919924b0-a701-429d-98d2-c812d136b74b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.105657 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/919924b0-a701-429d-98d2-c812d136b74b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "919924b0-a701-429d-98d2-c812d136b74b" (UID: "919924b0-a701-429d-98d2-c812d136b74b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.203097 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/919924b0-a701-429d-98d2-c812d136b74b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.215174 4800 generic.go:334] "Generic (PLEG): container finished" podID="919924b0-a701-429d-98d2-c812d136b74b" containerID="03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70" exitCode=0 Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.215223 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bvfj" event={"ID":"919924b0-a701-429d-98d2-c812d136b74b","Type":"ContainerDied","Data":"03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70"} Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.215264 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bvfj" event={"ID":"919924b0-a701-429d-98d2-c812d136b74b","Type":"ContainerDied","Data":"35186c944067a2fafc482de75b45e98baefa106ab1c5e5bd56c9755938ba0a0a"} Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.215261 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7bvfj" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.215311 4800 scope.go:117] "RemoveContainer" containerID="03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.271731 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7bvfj"] Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.271927 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7bvfj"] Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.281087 4800 scope.go:117] "RemoveContainer" containerID="211c667bd54c1540e90c13f59ab5cb9a180959bb2ff791066fd0e6d5991fbf46" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.305931 4800 scope.go:117] "RemoveContainer" containerID="6b497a5b99932662856d93d8e41695be165881ac0ee2189a73797a1f50bbba9d" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.355548 4800 scope.go:117] "RemoveContainer" containerID="03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70" Nov 25 16:52:23 crc kubenswrapper[4800]: E1125 16:52:23.356229 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70\": container with ID starting with 03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70 not found: ID does not exist" containerID="03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.356287 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70"} err="failed to get container status \"03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70\": rpc error: code = NotFound desc = could not find container \"03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70\": container with ID starting with 03770f49b8008f6b967b68cd58c5e03a5508545a2868725fe5f8bd372bd8ce70 not found: ID does not exist" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.356315 4800 scope.go:117] "RemoveContainer" containerID="211c667bd54c1540e90c13f59ab5cb9a180959bb2ff791066fd0e6d5991fbf46" Nov 25 16:52:23 crc kubenswrapper[4800]: E1125 16:52:23.356742 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"211c667bd54c1540e90c13f59ab5cb9a180959bb2ff791066fd0e6d5991fbf46\": container with ID starting with 211c667bd54c1540e90c13f59ab5cb9a180959bb2ff791066fd0e6d5991fbf46 not found: ID does not exist" containerID="211c667bd54c1540e90c13f59ab5cb9a180959bb2ff791066fd0e6d5991fbf46" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.356771 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"211c667bd54c1540e90c13f59ab5cb9a180959bb2ff791066fd0e6d5991fbf46"} err="failed to get container status \"211c667bd54c1540e90c13f59ab5cb9a180959bb2ff791066fd0e6d5991fbf46\": rpc error: code = NotFound desc = could not find container \"211c667bd54c1540e90c13f59ab5cb9a180959bb2ff791066fd0e6d5991fbf46\": container with ID starting with 211c667bd54c1540e90c13f59ab5cb9a180959bb2ff791066fd0e6d5991fbf46 not found: ID does not exist" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.356787 4800 scope.go:117] "RemoveContainer" containerID="6b497a5b99932662856d93d8e41695be165881ac0ee2189a73797a1f50bbba9d" Nov 25 16:52:23 crc kubenswrapper[4800]: E1125 16:52:23.357273 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b497a5b99932662856d93d8e41695be165881ac0ee2189a73797a1f50bbba9d\": container with ID starting with 6b497a5b99932662856d93d8e41695be165881ac0ee2189a73797a1f50bbba9d not found: ID does not exist" containerID="6b497a5b99932662856d93d8e41695be165881ac0ee2189a73797a1f50bbba9d" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.357323 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b497a5b99932662856d93d8e41695be165881ac0ee2189a73797a1f50bbba9d"} err="failed to get container status \"6b497a5b99932662856d93d8e41695be165881ac0ee2189a73797a1f50bbba9d\": rpc error: code = NotFound desc = could not find container \"6b497a5b99932662856d93d8e41695be165881ac0ee2189a73797a1f50bbba9d\": container with ID starting with 6b497a5b99932662856d93d8e41695be165881ac0ee2189a73797a1f50bbba9d not found: ID does not exist" Nov 25 16:52:23 crc kubenswrapper[4800]: I1125 16:52:23.799148 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="919924b0-a701-429d-98d2-c812d136b74b" path="/var/lib/kubelet/pods/919924b0-a701-429d-98d2-c812d136b74b/volumes" Nov 25 16:52:42 crc kubenswrapper[4800]: I1125 16:52:42.640762 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:52:42 crc kubenswrapper[4800]: I1125 16:52:42.641497 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.440332 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ccsth"] Nov 25 16:52:45 crc kubenswrapper[4800]: E1125 16:52:45.441509 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="919924b0-a701-429d-98d2-c812d136b74b" containerName="extract-utilities" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.441525 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="919924b0-a701-429d-98d2-c812d136b74b" containerName="extract-utilities" Nov 25 16:52:45 crc kubenswrapper[4800]: E1125 16:52:45.441552 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="919924b0-a701-429d-98d2-c812d136b74b" containerName="registry-server" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.441560 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="919924b0-a701-429d-98d2-c812d136b74b" containerName="registry-server" Nov 25 16:52:45 crc kubenswrapper[4800]: E1125 16:52:45.441577 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="919924b0-a701-429d-98d2-c812d136b74b" containerName="extract-content" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.441585 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="919924b0-a701-429d-98d2-c812d136b74b" containerName="extract-content" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.442119 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="919924b0-a701-429d-98d2-c812d136b74b" containerName="registry-server" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.443779 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.460371 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ccsth"] Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.582658 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch5w8\" (UniqueName: \"kubernetes.io/projected/dd34807f-4d4c-4878-8788-022e8555a839-kube-api-access-ch5w8\") pod \"community-operators-ccsth\" (UID: \"dd34807f-4d4c-4878-8788-022e8555a839\") " pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.583213 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd34807f-4d4c-4878-8788-022e8555a839-utilities\") pod \"community-operators-ccsth\" (UID: \"dd34807f-4d4c-4878-8788-022e8555a839\") " pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.583313 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd34807f-4d4c-4878-8788-022e8555a839-catalog-content\") pod \"community-operators-ccsth\" (UID: \"dd34807f-4d4c-4878-8788-022e8555a839\") " pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.685695 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch5w8\" (UniqueName: \"kubernetes.io/projected/dd34807f-4d4c-4878-8788-022e8555a839-kube-api-access-ch5w8\") pod \"community-operators-ccsth\" (UID: \"dd34807f-4d4c-4878-8788-022e8555a839\") " pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.685893 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd34807f-4d4c-4878-8788-022e8555a839-utilities\") pod \"community-operators-ccsth\" (UID: \"dd34807f-4d4c-4878-8788-022e8555a839\") " pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.685937 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd34807f-4d4c-4878-8788-022e8555a839-catalog-content\") pod \"community-operators-ccsth\" (UID: \"dd34807f-4d4c-4878-8788-022e8555a839\") " pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.686586 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd34807f-4d4c-4878-8788-022e8555a839-catalog-content\") pod \"community-operators-ccsth\" (UID: \"dd34807f-4d4c-4878-8788-022e8555a839\") " pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.687247 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd34807f-4d4c-4878-8788-022e8555a839-utilities\") pod \"community-operators-ccsth\" (UID: \"dd34807f-4d4c-4878-8788-022e8555a839\") " pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.714121 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch5w8\" (UniqueName: \"kubernetes.io/projected/dd34807f-4d4c-4878-8788-022e8555a839-kube-api-access-ch5w8\") pod \"community-operators-ccsth\" (UID: \"dd34807f-4d4c-4878-8788-022e8555a839\") " pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:45 crc kubenswrapper[4800]: I1125 16:52:45.767447 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:46 crc kubenswrapper[4800]: I1125 16:52:46.376073 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ccsth"] Nov 25 16:52:46 crc kubenswrapper[4800]: I1125 16:52:46.459953 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ccsth" event={"ID":"dd34807f-4d4c-4878-8788-022e8555a839","Type":"ContainerStarted","Data":"e132e6db6978c4f0d1cc00bbb751beda82ac42e06e0a79ed03cb81306a84e5be"} Nov 25 16:52:47 crc kubenswrapper[4800]: I1125 16:52:47.469449 4800 generic.go:334] "Generic (PLEG): container finished" podID="dd34807f-4d4c-4878-8788-022e8555a839" containerID="f7dceb5a03269db20e083dfc9e704e4115c8d64508c3b04c7c2e3fb12ec117f1" exitCode=0 Nov 25 16:52:47 crc kubenswrapper[4800]: I1125 16:52:47.469564 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ccsth" event={"ID":"dd34807f-4d4c-4878-8788-022e8555a839","Type":"ContainerDied","Data":"f7dceb5a03269db20e083dfc9e704e4115c8d64508c3b04c7c2e3fb12ec117f1"} Nov 25 16:52:48 crc kubenswrapper[4800]: I1125 16:52:48.482070 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ccsth" event={"ID":"dd34807f-4d4c-4878-8788-022e8555a839","Type":"ContainerStarted","Data":"dead5c42e97b1a3ef326d65e4d6088e64c82cb4f84bb206e145db559f3bface6"} Nov 25 16:52:49 crc kubenswrapper[4800]: I1125 16:52:49.497338 4800 generic.go:334] "Generic (PLEG): container finished" podID="dd34807f-4d4c-4878-8788-022e8555a839" containerID="dead5c42e97b1a3ef326d65e4d6088e64c82cb4f84bb206e145db559f3bface6" exitCode=0 Nov 25 16:52:49 crc kubenswrapper[4800]: I1125 16:52:49.497414 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ccsth" event={"ID":"dd34807f-4d4c-4878-8788-022e8555a839","Type":"ContainerDied","Data":"dead5c42e97b1a3ef326d65e4d6088e64c82cb4f84bb206e145db559f3bface6"} Nov 25 16:52:50 crc kubenswrapper[4800]: I1125 16:52:50.520259 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ccsth" event={"ID":"dd34807f-4d4c-4878-8788-022e8555a839","Type":"ContainerStarted","Data":"2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2"} Nov 25 16:52:50 crc kubenswrapper[4800]: I1125 16:52:50.566427 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ccsth" podStartSLOduration=3.131512335 podStartE2EDuration="5.56640552s" podCreationTimestamp="2025-11-25 16:52:45 +0000 UTC" firstStartedPulling="2025-11-25 16:52:47.471685355 +0000 UTC m=+5728.526093837" lastFinishedPulling="2025-11-25 16:52:49.90657854 +0000 UTC m=+5730.960987022" observedRunningTime="2025-11-25 16:52:50.547078333 +0000 UTC m=+5731.601486815" watchObservedRunningTime="2025-11-25 16:52:50.56640552 +0000 UTC m=+5731.620814022" Nov 25 16:52:55 crc kubenswrapper[4800]: I1125 16:52:55.767810 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:55 crc kubenswrapper[4800]: I1125 16:52:55.770381 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:55 crc kubenswrapper[4800]: I1125 16:52:55.820979 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:56 crc kubenswrapper[4800]: I1125 16:52:56.638137 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:56 crc kubenswrapper[4800]: I1125 16:52:56.696327 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ccsth"] Nov 25 16:52:58 crc kubenswrapper[4800]: I1125 16:52:58.602590 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ccsth" podUID="dd34807f-4d4c-4878-8788-022e8555a839" containerName="registry-server" containerID="cri-o://2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2" gracePeriod=2 Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.250150 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.389182 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd34807f-4d4c-4878-8788-022e8555a839-utilities\") pod \"dd34807f-4d4c-4878-8788-022e8555a839\" (UID: \"dd34807f-4d4c-4878-8788-022e8555a839\") " Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.389230 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd34807f-4d4c-4878-8788-022e8555a839-catalog-content\") pod \"dd34807f-4d4c-4878-8788-022e8555a839\" (UID: \"dd34807f-4d4c-4878-8788-022e8555a839\") " Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.389433 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ch5w8\" (UniqueName: \"kubernetes.io/projected/dd34807f-4d4c-4878-8788-022e8555a839-kube-api-access-ch5w8\") pod \"dd34807f-4d4c-4878-8788-022e8555a839\" (UID: \"dd34807f-4d4c-4878-8788-022e8555a839\") " Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.390210 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd34807f-4d4c-4878-8788-022e8555a839-utilities" (OuterVolumeSpecName: "utilities") pod "dd34807f-4d4c-4878-8788-022e8555a839" (UID: "dd34807f-4d4c-4878-8788-022e8555a839"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.408318 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd34807f-4d4c-4878-8788-022e8555a839-kube-api-access-ch5w8" (OuterVolumeSpecName: "kube-api-access-ch5w8") pod "dd34807f-4d4c-4878-8788-022e8555a839" (UID: "dd34807f-4d4c-4878-8788-022e8555a839"). InnerVolumeSpecName "kube-api-access-ch5w8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.452144 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd34807f-4d4c-4878-8788-022e8555a839-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dd34807f-4d4c-4878-8788-022e8555a839" (UID: "dd34807f-4d4c-4878-8788-022e8555a839"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.491959 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd34807f-4d4c-4878-8788-022e8555a839-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.491994 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd34807f-4d4c-4878-8788-022e8555a839-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.492007 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ch5w8\" (UniqueName: \"kubernetes.io/projected/dd34807f-4d4c-4878-8788-022e8555a839-kube-api-access-ch5w8\") on node \"crc\" DevicePath \"\"" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.610148 4800 generic.go:334] "Generic (PLEG): container finished" podID="dd34807f-4d4c-4878-8788-022e8555a839" containerID="2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2" exitCode=0 Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.610195 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ccsth" event={"ID":"dd34807f-4d4c-4878-8788-022e8555a839","Type":"ContainerDied","Data":"2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2"} Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.610224 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ccsth" event={"ID":"dd34807f-4d4c-4878-8788-022e8555a839","Type":"ContainerDied","Data":"e132e6db6978c4f0d1cc00bbb751beda82ac42e06e0a79ed03cb81306a84e5be"} Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.610241 4800 scope.go:117] "RemoveContainer" containerID="2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.610375 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ccsth" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.637027 4800 scope.go:117] "RemoveContainer" containerID="dead5c42e97b1a3ef326d65e4d6088e64c82cb4f84bb206e145db559f3bface6" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.646659 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ccsth"] Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.668865 4800 scope.go:117] "RemoveContainer" containerID="f7dceb5a03269db20e083dfc9e704e4115c8d64508c3b04c7c2e3fb12ec117f1" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.669633 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ccsth"] Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.708378 4800 scope.go:117] "RemoveContainer" containerID="2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2" Nov 25 16:52:59 crc kubenswrapper[4800]: E1125 16:52:59.708868 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2\": container with ID starting with 2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2 not found: ID does not exist" containerID="2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.708920 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2"} err="failed to get container status \"2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2\": rpc error: code = NotFound desc = could not find container \"2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2\": container with ID starting with 2aa38b9ba6df6d90c2e2d1df0678f0c877017a12e723053587f41da44b2fc3e2 not found: ID does not exist" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.708957 4800 scope.go:117] "RemoveContainer" containerID="dead5c42e97b1a3ef326d65e4d6088e64c82cb4f84bb206e145db559f3bface6" Nov 25 16:52:59 crc kubenswrapper[4800]: E1125 16:52:59.709549 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dead5c42e97b1a3ef326d65e4d6088e64c82cb4f84bb206e145db559f3bface6\": container with ID starting with dead5c42e97b1a3ef326d65e4d6088e64c82cb4f84bb206e145db559f3bface6 not found: ID does not exist" containerID="dead5c42e97b1a3ef326d65e4d6088e64c82cb4f84bb206e145db559f3bface6" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.709580 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dead5c42e97b1a3ef326d65e4d6088e64c82cb4f84bb206e145db559f3bface6"} err="failed to get container status \"dead5c42e97b1a3ef326d65e4d6088e64c82cb4f84bb206e145db559f3bface6\": rpc error: code = NotFound desc = could not find container \"dead5c42e97b1a3ef326d65e4d6088e64c82cb4f84bb206e145db559f3bface6\": container with ID starting with dead5c42e97b1a3ef326d65e4d6088e64c82cb4f84bb206e145db559f3bface6 not found: ID does not exist" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.709600 4800 scope.go:117] "RemoveContainer" containerID="f7dceb5a03269db20e083dfc9e704e4115c8d64508c3b04c7c2e3fb12ec117f1" Nov 25 16:52:59 crc kubenswrapper[4800]: E1125 16:52:59.709966 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7dceb5a03269db20e083dfc9e704e4115c8d64508c3b04c7c2e3fb12ec117f1\": container with ID starting with f7dceb5a03269db20e083dfc9e704e4115c8d64508c3b04c7c2e3fb12ec117f1 not found: ID does not exist" containerID="f7dceb5a03269db20e083dfc9e704e4115c8d64508c3b04c7c2e3fb12ec117f1" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.709990 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7dceb5a03269db20e083dfc9e704e4115c8d64508c3b04c7c2e3fb12ec117f1"} err="failed to get container status \"f7dceb5a03269db20e083dfc9e704e4115c8d64508c3b04c7c2e3fb12ec117f1\": rpc error: code = NotFound desc = could not find container \"f7dceb5a03269db20e083dfc9e704e4115c8d64508c3b04c7c2e3fb12ec117f1\": container with ID starting with f7dceb5a03269db20e083dfc9e704e4115c8d64508c3b04c7c2e3fb12ec117f1 not found: ID does not exist" Nov 25 16:52:59 crc kubenswrapper[4800]: I1125 16:52:59.799266 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd34807f-4d4c-4878-8788-022e8555a839" path="/var/lib/kubelet/pods/dd34807f-4d4c-4878-8788-022e8555a839/volumes" Nov 25 16:53:12 crc kubenswrapper[4800]: I1125 16:53:12.639671 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:53:12 crc kubenswrapper[4800]: I1125 16:53:12.640219 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:53:12 crc kubenswrapper[4800]: I1125 16:53:12.640270 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 16:53:12 crc kubenswrapper[4800]: I1125 16:53:12.641182 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5590c6db652dfe7f3257f0876f6ffc4b23529a1f4f3ebfd0a79ee46777b669af"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:53:12 crc kubenswrapper[4800]: I1125 16:53:12.641256 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://5590c6db652dfe7f3257f0876f6ffc4b23529a1f4f3ebfd0a79ee46777b669af" gracePeriod=600 Nov 25 16:53:13 crc kubenswrapper[4800]: I1125 16:53:13.730717 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="5590c6db652dfe7f3257f0876f6ffc4b23529a1f4f3ebfd0a79ee46777b669af" exitCode=0 Nov 25 16:53:13 crc kubenswrapper[4800]: I1125 16:53:13.730781 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"5590c6db652dfe7f3257f0876f6ffc4b23529a1f4f3ebfd0a79ee46777b669af"} Nov 25 16:53:13 crc kubenswrapper[4800]: I1125 16:53:13.731217 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490"} Nov 25 16:53:13 crc kubenswrapper[4800]: I1125 16:53:13.731240 4800 scope.go:117] "RemoveContainer" containerID="54e59727c5086e0796b837135a000d42d997346a5e6de0d3df7af5e9919ad60f" Nov 25 16:54:23 crc kubenswrapper[4800]: I1125 16:54:23.135122 4800 patch_prober.go:28] interesting pod/oauth-openshift-b7d5b84cf-pjw76 container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.60:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 16:54:23 crc kubenswrapper[4800]: I1125 16:54:23.135649 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-b7d5b84cf-pjw76" podUID="0b701ddb-a112-4067-8cfd-59d18f57b301" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.60:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 16:54:23 crc kubenswrapper[4800]: I1125 16:54:23.861068 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" podUID="05323c34-8333-474b-9713-a1b20ea27b72" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.51:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 16:54:23 crc kubenswrapper[4800]: I1125 16:54:23.861067 4800 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-6998585d5-p7rt2" podUID="05323c34-8333-474b-9713-a1b20ea27b72" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.51:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 16:55:42 crc kubenswrapper[4800]: I1125 16:55:42.640191 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:55:42 crc kubenswrapper[4800]: I1125 16:55:42.640966 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:56:12 crc kubenswrapper[4800]: I1125 16:56:12.639919 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:56:12 crc kubenswrapper[4800]: I1125 16:56:12.640524 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:56:42 crc kubenswrapper[4800]: I1125 16:56:42.640930 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:56:42 crc kubenswrapper[4800]: I1125 16:56:42.641420 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:56:42 crc kubenswrapper[4800]: I1125 16:56:42.641470 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 16:56:42 crc kubenswrapper[4800]: I1125 16:56:42.642243 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:56:42 crc kubenswrapper[4800]: I1125 16:56:42.642288 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" gracePeriod=600 Nov 25 16:56:42 crc kubenswrapper[4800]: E1125 16:56:42.778780 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:56:42 crc kubenswrapper[4800]: I1125 16:56:42.895186 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" exitCode=0 Nov 25 16:56:42 crc kubenswrapper[4800]: I1125 16:56:42.895265 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490"} Nov 25 16:56:42 crc kubenswrapper[4800]: I1125 16:56:42.895908 4800 scope.go:117] "RemoveContainer" containerID="5590c6db652dfe7f3257f0876f6ffc4b23529a1f4f3ebfd0a79ee46777b669af" Nov 25 16:56:42 crc kubenswrapper[4800]: I1125 16:56:42.896572 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:56:42 crc kubenswrapper[4800]: E1125 16:56:42.896829 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:56:56 crc kubenswrapper[4800]: I1125 16:56:56.786349 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:56:56 crc kubenswrapper[4800]: E1125 16:56:56.787644 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:57:09 crc kubenswrapper[4800]: I1125 16:57:09.794168 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:57:09 crc kubenswrapper[4800]: E1125 16:57:09.796068 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:57:22 crc kubenswrapper[4800]: I1125 16:57:22.786123 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:57:22 crc kubenswrapper[4800]: E1125 16:57:22.787264 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:57:33 crc kubenswrapper[4800]: I1125 16:57:33.785315 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:57:33 crc kubenswrapper[4800]: E1125 16:57:33.786260 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:57:46 crc kubenswrapper[4800]: I1125 16:57:46.786475 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:57:46 crc kubenswrapper[4800]: E1125 16:57:46.787332 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:57:59 crc kubenswrapper[4800]: I1125 16:57:59.795266 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:57:59 crc kubenswrapper[4800]: E1125 16:57:59.796367 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:58:12 crc kubenswrapper[4800]: I1125 16:58:12.785062 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:58:12 crc kubenswrapper[4800]: E1125 16:58:12.785814 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:58:25 crc kubenswrapper[4800]: I1125 16:58:25.785752 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:58:25 crc kubenswrapper[4800]: E1125 16:58:25.786641 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:58:37 crc kubenswrapper[4800]: I1125 16:58:37.785536 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:58:37 crc kubenswrapper[4800]: E1125 16:58:37.786250 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:58:51 crc kubenswrapper[4800]: I1125 16:58:51.786430 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:58:51 crc kubenswrapper[4800]: E1125 16:58:51.787077 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:59:03 crc kubenswrapper[4800]: I1125 16:59:03.786152 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:59:03 crc kubenswrapper[4800]: E1125 16:59:03.789390 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:59:15 crc kubenswrapper[4800]: I1125 16:59:15.784998 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:59:15 crc kubenswrapper[4800]: E1125 16:59:15.785747 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.527432 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cxdhm"] Nov 25 16:59:16 crc kubenswrapper[4800]: E1125 16:59:16.528009 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd34807f-4d4c-4878-8788-022e8555a839" containerName="extract-content" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.528030 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd34807f-4d4c-4878-8788-022e8555a839" containerName="extract-content" Nov 25 16:59:16 crc kubenswrapper[4800]: E1125 16:59:16.528057 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd34807f-4d4c-4878-8788-022e8555a839" containerName="extract-utilities" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.528067 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd34807f-4d4c-4878-8788-022e8555a839" containerName="extract-utilities" Nov 25 16:59:16 crc kubenswrapper[4800]: E1125 16:59:16.528090 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd34807f-4d4c-4878-8788-022e8555a839" containerName="registry-server" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.528099 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd34807f-4d4c-4878-8788-022e8555a839" containerName="registry-server" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.528357 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd34807f-4d4c-4878-8788-022e8555a839" containerName="registry-server" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.530815 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.564598 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cxdhm"] Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.662674 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7pg6\" (UniqueName: \"kubernetes.io/projected/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-kube-api-access-h7pg6\") pod \"certified-operators-cxdhm\" (UID: \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\") " pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.662875 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-utilities\") pod \"certified-operators-cxdhm\" (UID: \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\") " pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.663360 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-catalog-content\") pod \"certified-operators-cxdhm\" (UID: \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\") " pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.764943 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-catalog-content\") pod \"certified-operators-cxdhm\" (UID: \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\") " pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.764991 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7pg6\" (UniqueName: \"kubernetes.io/projected/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-kube-api-access-h7pg6\") pod \"certified-operators-cxdhm\" (UID: \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\") " pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.765036 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-utilities\") pod \"certified-operators-cxdhm\" (UID: \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\") " pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.765433 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-utilities\") pod \"certified-operators-cxdhm\" (UID: \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\") " pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.765638 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-catalog-content\") pod \"certified-operators-cxdhm\" (UID: \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\") " pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.787813 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7pg6\" (UniqueName: \"kubernetes.io/projected/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-kube-api-access-h7pg6\") pod \"certified-operators-cxdhm\" (UID: \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\") " pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:16 crc kubenswrapper[4800]: I1125 16:59:16.875657 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:17 crc kubenswrapper[4800]: I1125 16:59:17.415618 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cxdhm"] Nov 25 16:59:17 crc kubenswrapper[4800]: I1125 16:59:17.451191 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cxdhm" event={"ID":"2f32fafc-8fa3-4350-8634-71f9ecd3bd59","Type":"ContainerStarted","Data":"f84d5616a5c67261f61bdf858a998e5abfa610e83e5bebaa11e9936d6e066f8a"} Nov 25 16:59:18 crc kubenswrapper[4800]: I1125 16:59:18.462104 4800 generic.go:334] "Generic (PLEG): container finished" podID="2f32fafc-8fa3-4350-8634-71f9ecd3bd59" containerID="7e720725ff90905604b368319107e2f2d57e2ec1c02231a8e169eca4eaede4d5" exitCode=0 Nov 25 16:59:18 crc kubenswrapper[4800]: I1125 16:59:18.462155 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cxdhm" event={"ID":"2f32fafc-8fa3-4350-8634-71f9ecd3bd59","Type":"ContainerDied","Data":"7e720725ff90905604b368319107e2f2d57e2ec1c02231a8e169eca4eaede4d5"} Nov 25 16:59:18 crc kubenswrapper[4800]: I1125 16:59:18.465976 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 16:59:20 crc kubenswrapper[4800]: I1125 16:59:20.497627 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cxdhm" event={"ID":"2f32fafc-8fa3-4350-8634-71f9ecd3bd59","Type":"ContainerStarted","Data":"e92e9ca9ca7638a85927a183f014761cdd13bedf966712be53e302b4fdf58f37"} Nov 25 16:59:21 crc kubenswrapper[4800]: I1125 16:59:21.508640 4800 generic.go:334] "Generic (PLEG): container finished" podID="2f32fafc-8fa3-4350-8634-71f9ecd3bd59" containerID="e92e9ca9ca7638a85927a183f014761cdd13bedf966712be53e302b4fdf58f37" exitCode=0 Nov 25 16:59:21 crc kubenswrapper[4800]: I1125 16:59:21.508712 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cxdhm" event={"ID":"2f32fafc-8fa3-4350-8634-71f9ecd3bd59","Type":"ContainerDied","Data":"e92e9ca9ca7638a85927a183f014761cdd13bedf966712be53e302b4fdf58f37"} Nov 25 16:59:22 crc kubenswrapper[4800]: I1125 16:59:22.518772 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cxdhm" event={"ID":"2f32fafc-8fa3-4350-8634-71f9ecd3bd59","Type":"ContainerStarted","Data":"e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b"} Nov 25 16:59:22 crc kubenswrapper[4800]: I1125 16:59:22.539649 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cxdhm" podStartSLOduration=2.922707322 podStartE2EDuration="6.539632501s" podCreationTimestamp="2025-11-25 16:59:16 +0000 UTC" firstStartedPulling="2025-11-25 16:59:18.465267264 +0000 UTC m=+6119.519675786" lastFinishedPulling="2025-11-25 16:59:22.082192483 +0000 UTC m=+6123.136600965" observedRunningTime="2025-11-25 16:59:22.538240743 +0000 UTC m=+6123.592649235" watchObservedRunningTime="2025-11-25 16:59:22.539632501 +0000 UTC m=+6123.594040983" Nov 25 16:59:26 crc kubenswrapper[4800]: I1125 16:59:26.785514 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:59:26 crc kubenswrapper[4800]: E1125 16:59:26.786567 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:59:26 crc kubenswrapper[4800]: I1125 16:59:26.876467 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:26 crc kubenswrapper[4800]: I1125 16:59:26.876525 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:26 crc kubenswrapper[4800]: I1125 16:59:26.943781 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:27 crc kubenswrapper[4800]: I1125 16:59:27.615196 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:27 crc kubenswrapper[4800]: I1125 16:59:27.668762 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cxdhm"] Nov 25 16:59:29 crc kubenswrapper[4800]: I1125 16:59:29.581276 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cxdhm" podUID="2f32fafc-8fa3-4350-8634-71f9ecd3bd59" containerName="registry-server" containerID="cri-o://e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b" gracePeriod=2 Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.241235 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.363568 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-utilities\") pod \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\" (UID: \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\") " Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.363641 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-catalog-content\") pod \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\" (UID: \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\") " Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.363726 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7pg6\" (UniqueName: \"kubernetes.io/projected/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-kube-api-access-h7pg6\") pod \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\" (UID: \"2f32fafc-8fa3-4350-8634-71f9ecd3bd59\") " Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.365506 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-utilities" (OuterVolumeSpecName: "utilities") pod "2f32fafc-8fa3-4350-8634-71f9ecd3bd59" (UID: "2f32fafc-8fa3-4350-8634-71f9ecd3bd59"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.371501 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-kube-api-access-h7pg6" (OuterVolumeSpecName: "kube-api-access-h7pg6") pod "2f32fafc-8fa3-4350-8634-71f9ecd3bd59" (UID: "2f32fafc-8fa3-4350-8634-71f9ecd3bd59"). InnerVolumeSpecName "kube-api-access-h7pg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.421331 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2f32fafc-8fa3-4350-8634-71f9ecd3bd59" (UID: "2f32fafc-8fa3-4350-8634-71f9ecd3bd59"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.465563 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.465591 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.465625 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7pg6\" (UniqueName: \"kubernetes.io/projected/2f32fafc-8fa3-4350-8634-71f9ecd3bd59-kube-api-access-h7pg6\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.595464 4800 generic.go:334] "Generic (PLEG): container finished" podID="2f32fafc-8fa3-4350-8634-71f9ecd3bd59" containerID="e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b" exitCode=0 Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.595533 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cxdhm" event={"ID":"2f32fafc-8fa3-4350-8634-71f9ecd3bd59","Type":"ContainerDied","Data":"e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b"} Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.595577 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cxdhm" event={"ID":"2f32fafc-8fa3-4350-8634-71f9ecd3bd59","Type":"ContainerDied","Data":"f84d5616a5c67261f61bdf858a998e5abfa610e83e5bebaa11e9936d6e066f8a"} Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.595604 4800 scope.go:117] "RemoveContainer" containerID="e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.595814 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cxdhm" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.635146 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cxdhm"] Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.635964 4800 scope.go:117] "RemoveContainer" containerID="e92e9ca9ca7638a85927a183f014761cdd13bedf966712be53e302b4fdf58f37" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.647902 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cxdhm"] Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.661009 4800 scope.go:117] "RemoveContainer" containerID="7e720725ff90905604b368319107e2f2d57e2ec1c02231a8e169eca4eaede4d5" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.718278 4800 scope.go:117] "RemoveContainer" containerID="e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b" Nov 25 16:59:30 crc kubenswrapper[4800]: E1125 16:59:30.720962 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b\": container with ID starting with e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b not found: ID does not exist" containerID="e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.721121 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b"} err="failed to get container status \"e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b\": rpc error: code = NotFound desc = could not find container \"e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b\": container with ID starting with e50bc2be35292d22ac93dafe7c6d5b6898ef1878d133837a48cc35e7a0ece75b not found: ID does not exist" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.721222 4800 scope.go:117] "RemoveContainer" containerID="e92e9ca9ca7638a85927a183f014761cdd13bedf966712be53e302b4fdf58f37" Nov 25 16:59:30 crc kubenswrapper[4800]: E1125 16:59:30.721683 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e92e9ca9ca7638a85927a183f014761cdd13bedf966712be53e302b4fdf58f37\": container with ID starting with e92e9ca9ca7638a85927a183f014761cdd13bedf966712be53e302b4fdf58f37 not found: ID does not exist" containerID="e92e9ca9ca7638a85927a183f014761cdd13bedf966712be53e302b4fdf58f37" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.721783 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e92e9ca9ca7638a85927a183f014761cdd13bedf966712be53e302b4fdf58f37"} err="failed to get container status \"e92e9ca9ca7638a85927a183f014761cdd13bedf966712be53e302b4fdf58f37\": rpc error: code = NotFound desc = could not find container \"e92e9ca9ca7638a85927a183f014761cdd13bedf966712be53e302b4fdf58f37\": container with ID starting with e92e9ca9ca7638a85927a183f014761cdd13bedf966712be53e302b4fdf58f37 not found: ID does not exist" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.721875 4800 scope.go:117] "RemoveContainer" containerID="7e720725ff90905604b368319107e2f2d57e2ec1c02231a8e169eca4eaede4d5" Nov 25 16:59:30 crc kubenswrapper[4800]: E1125 16:59:30.722203 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e720725ff90905604b368319107e2f2d57e2ec1c02231a8e169eca4eaede4d5\": container with ID starting with 7e720725ff90905604b368319107e2f2d57e2ec1c02231a8e169eca4eaede4d5 not found: ID does not exist" containerID="7e720725ff90905604b368319107e2f2d57e2ec1c02231a8e169eca4eaede4d5" Nov 25 16:59:30 crc kubenswrapper[4800]: I1125 16:59:30.722318 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e720725ff90905604b368319107e2f2d57e2ec1c02231a8e169eca4eaede4d5"} err="failed to get container status \"7e720725ff90905604b368319107e2f2d57e2ec1c02231a8e169eca4eaede4d5\": rpc error: code = NotFound desc = could not find container \"7e720725ff90905604b368319107e2f2d57e2ec1c02231a8e169eca4eaede4d5\": container with ID starting with 7e720725ff90905604b368319107e2f2d57e2ec1c02231a8e169eca4eaede4d5 not found: ID does not exist" Nov 25 16:59:31 crc kubenswrapper[4800]: I1125 16:59:31.797433 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f32fafc-8fa3-4350-8634-71f9ecd3bd59" path="/var/lib/kubelet/pods/2f32fafc-8fa3-4350-8634-71f9ecd3bd59/volumes" Nov 25 16:59:35 crc kubenswrapper[4800]: I1125 16:59:35.991091 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lznlr"] Nov 25 16:59:35 crc kubenswrapper[4800]: E1125 16:59:35.992105 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f32fafc-8fa3-4350-8634-71f9ecd3bd59" containerName="registry-server" Nov 25 16:59:35 crc kubenswrapper[4800]: I1125 16:59:35.992121 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f32fafc-8fa3-4350-8634-71f9ecd3bd59" containerName="registry-server" Nov 25 16:59:35 crc kubenswrapper[4800]: E1125 16:59:35.992140 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f32fafc-8fa3-4350-8634-71f9ecd3bd59" containerName="extract-utilities" Nov 25 16:59:35 crc kubenswrapper[4800]: I1125 16:59:35.992148 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f32fafc-8fa3-4350-8634-71f9ecd3bd59" containerName="extract-utilities" Nov 25 16:59:35 crc kubenswrapper[4800]: E1125 16:59:35.992176 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f32fafc-8fa3-4350-8634-71f9ecd3bd59" containerName="extract-content" Nov 25 16:59:35 crc kubenswrapper[4800]: I1125 16:59:35.992186 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f32fafc-8fa3-4350-8634-71f9ecd3bd59" containerName="extract-content" Nov 25 16:59:35 crc kubenswrapper[4800]: I1125 16:59:35.992462 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f32fafc-8fa3-4350-8634-71f9ecd3bd59" containerName="registry-server" Nov 25 16:59:35 crc kubenswrapper[4800]: I1125 16:59:35.995130 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:36 crc kubenswrapper[4800]: I1125 16:59:36.012273 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lznlr"] Nov 25 16:59:36 crc kubenswrapper[4800]: I1125 16:59:36.071607 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6wpf\" (UniqueName: \"kubernetes.io/projected/fb611793-cd49-4a1f-9faa-1aa2d7233db6-kube-api-access-w6wpf\") pod \"redhat-marketplace-lznlr\" (UID: \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\") " pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:36 crc kubenswrapper[4800]: I1125 16:59:36.071676 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb611793-cd49-4a1f-9faa-1aa2d7233db6-utilities\") pod \"redhat-marketplace-lznlr\" (UID: \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\") " pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:36 crc kubenswrapper[4800]: I1125 16:59:36.071750 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb611793-cd49-4a1f-9faa-1aa2d7233db6-catalog-content\") pod \"redhat-marketplace-lznlr\" (UID: \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\") " pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:36 crc kubenswrapper[4800]: I1125 16:59:36.173228 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6wpf\" (UniqueName: \"kubernetes.io/projected/fb611793-cd49-4a1f-9faa-1aa2d7233db6-kube-api-access-w6wpf\") pod \"redhat-marketplace-lznlr\" (UID: \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\") " pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:36 crc kubenswrapper[4800]: I1125 16:59:36.173595 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb611793-cd49-4a1f-9faa-1aa2d7233db6-utilities\") pod \"redhat-marketplace-lznlr\" (UID: \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\") " pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:36 crc kubenswrapper[4800]: I1125 16:59:36.173696 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb611793-cd49-4a1f-9faa-1aa2d7233db6-catalog-content\") pod \"redhat-marketplace-lznlr\" (UID: \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\") " pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:36 crc kubenswrapper[4800]: I1125 16:59:36.174204 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb611793-cd49-4a1f-9faa-1aa2d7233db6-utilities\") pod \"redhat-marketplace-lznlr\" (UID: \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\") " pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:36 crc kubenswrapper[4800]: I1125 16:59:36.174207 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb611793-cd49-4a1f-9faa-1aa2d7233db6-catalog-content\") pod \"redhat-marketplace-lznlr\" (UID: \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\") " pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:36 crc kubenswrapper[4800]: I1125 16:59:36.200634 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6wpf\" (UniqueName: \"kubernetes.io/projected/fb611793-cd49-4a1f-9faa-1aa2d7233db6-kube-api-access-w6wpf\") pod \"redhat-marketplace-lznlr\" (UID: \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\") " pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:36 crc kubenswrapper[4800]: I1125 16:59:36.317559 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:36 crc kubenswrapper[4800]: I1125 16:59:36.853479 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lznlr"] Nov 25 16:59:37 crc kubenswrapper[4800]: I1125 16:59:37.667296 4800 generic.go:334] "Generic (PLEG): container finished" podID="fb611793-cd49-4a1f-9faa-1aa2d7233db6" containerID="1ec2a79012a3460796c8aa94dccbc3fbec2f7f9575bbf3c46ed18908c8aed617" exitCode=0 Nov 25 16:59:37 crc kubenswrapper[4800]: I1125 16:59:37.667410 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lznlr" event={"ID":"fb611793-cd49-4a1f-9faa-1aa2d7233db6","Type":"ContainerDied","Data":"1ec2a79012a3460796c8aa94dccbc3fbec2f7f9575bbf3c46ed18908c8aed617"} Nov 25 16:59:37 crc kubenswrapper[4800]: I1125 16:59:37.667673 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lznlr" event={"ID":"fb611793-cd49-4a1f-9faa-1aa2d7233db6","Type":"ContainerStarted","Data":"3ca92460fbd8bf39e9054315feb24c67c076c7b2635451b4b361cb233c0e41da"} Nov 25 16:59:38 crc kubenswrapper[4800]: I1125 16:59:38.683790 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lznlr" event={"ID":"fb611793-cd49-4a1f-9faa-1aa2d7233db6","Type":"ContainerStarted","Data":"91f8e4025edd36fb45c6bf5125ec9147145873c238115d49417f54ca95814f98"} Nov 25 16:59:38 crc kubenswrapper[4800]: I1125 16:59:38.786230 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:59:38 crc kubenswrapper[4800]: E1125 16:59:38.786676 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 16:59:39 crc kubenswrapper[4800]: I1125 16:59:39.699019 4800 generic.go:334] "Generic (PLEG): container finished" podID="fb611793-cd49-4a1f-9faa-1aa2d7233db6" containerID="91f8e4025edd36fb45c6bf5125ec9147145873c238115d49417f54ca95814f98" exitCode=0 Nov 25 16:59:39 crc kubenswrapper[4800]: I1125 16:59:39.699120 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lznlr" event={"ID":"fb611793-cd49-4a1f-9faa-1aa2d7233db6","Type":"ContainerDied","Data":"91f8e4025edd36fb45c6bf5125ec9147145873c238115d49417f54ca95814f98"} Nov 25 16:59:40 crc kubenswrapper[4800]: I1125 16:59:40.709927 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lznlr" event={"ID":"fb611793-cd49-4a1f-9faa-1aa2d7233db6","Type":"ContainerStarted","Data":"c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea"} Nov 25 16:59:40 crc kubenswrapper[4800]: I1125 16:59:40.739221 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lznlr" podStartSLOduration=3.280512816 podStartE2EDuration="5.739191036s" podCreationTimestamp="2025-11-25 16:59:35 +0000 UTC" firstStartedPulling="2025-11-25 16:59:37.669729512 +0000 UTC m=+6138.724138034" lastFinishedPulling="2025-11-25 16:59:40.128407752 +0000 UTC m=+6141.182816254" observedRunningTime="2025-11-25 16:59:40.730804097 +0000 UTC m=+6141.785212599" watchObservedRunningTime="2025-11-25 16:59:40.739191036 +0000 UTC m=+6141.793599518" Nov 25 16:59:46 crc kubenswrapper[4800]: I1125 16:59:46.318464 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:46 crc kubenswrapper[4800]: I1125 16:59:46.319117 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:46 crc kubenswrapper[4800]: I1125 16:59:46.379303 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:46 crc kubenswrapper[4800]: I1125 16:59:46.839834 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:47 crc kubenswrapper[4800]: I1125 16:59:47.623993 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lznlr"] Nov 25 16:59:48 crc kubenswrapper[4800]: I1125 16:59:48.791218 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lznlr" podUID="fb611793-cd49-4a1f-9faa-1aa2d7233db6" containerName="registry-server" containerID="cri-o://c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea" gracePeriod=2 Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.429493 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.561565 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb611793-cd49-4a1f-9faa-1aa2d7233db6-catalog-content\") pod \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\" (UID: \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\") " Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.561720 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6wpf\" (UniqueName: \"kubernetes.io/projected/fb611793-cd49-4a1f-9faa-1aa2d7233db6-kube-api-access-w6wpf\") pod \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\" (UID: \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\") " Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.561954 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb611793-cd49-4a1f-9faa-1aa2d7233db6-utilities\") pod \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\" (UID: \"fb611793-cd49-4a1f-9faa-1aa2d7233db6\") " Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.563224 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb611793-cd49-4a1f-9faa-1aa2d7233db6-utilities" (OuterVolumeSpecName: "utilities") pod "fb611793-cd49-4a1f-9faa-1aa2d7233db6" (UID: "fb611793-cd49-4a1f-9faa-1aa2d7233db6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.568080 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb611793-cd49-4a1f-9faa-1aa2d7233db6-kube-api-access-w6wpf" (OuterVolumeSpecName: "kube-api-access-w6wpf") pod "fb611793-cd49-4a1f-9faa-1aa2d7233db6" (UID: "fb611793-cd49-4a1f-9faa-1aa2d7233db6"). InnerVolumeSpecName "kube-api-access-w6wpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.579335 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb611793-cd49-4a1f-9faa-1aa2d7233db6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fb611793-cd49-4a1f-9faa-1aa2d7233db6" (UID: "fb611793-cd49-4a1f-9faa-1aa2d7233db6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.663982 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb611793-cd49-4a1f-9faa-1aa2d7233db6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.664030 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb611793-cd49-4a1f-9faa-1aa2d7233db6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.664043 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6wpf\" (UniqueName: \"kubernetes.io/projected/fb611793-cd49-4a1f-9faa-1aa2d7233db6-kube-api-access-w6wpf\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.808742 4800 generic.go:334] "Generic (PLEG): container finished" podID="fb611793-cd49-4a1f-9faa-1aa2d7233db6" containerID="c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea" exitCode=0 Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.808825 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lznlr" event={"ID":"fb611793-cd49-4a1f-9faa-1aa2d7233db6","Type":"ContainerDied","Data":"c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea"} Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.808909 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lznlr" event={"ID":"fb611793-cd49-4a1f-9faa-1aa2d7233db6","Type":"ContainerDied","Data":"3ca92460fbd8bf39e9054315feb24c67c076c7b2635451b4b361cb233c0e41da"} Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.808927 4800 scope.go:117] "RemoveContainer" containerID="c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.808930 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lznlr" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.838919 4800 scope.go:117] "RemoveContainer" containerID="91f8e4025edd36fb45c6bf5125ec9147145873c238115d49417f54ca95814f98" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.844730 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lznlr"] Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.858885 4800 scope.go:117] "RemoveContainer" containerID="1ec2a79012a3460796c8aa94dccbc3fbec2f7f9575bbf3c46ed18908c8aed617" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.859906 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lznlr"] Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.902556 4800 scope.go:117] "RemoveContainer" containerID="c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea" Nov 25 16:59:49 crc kubenswrapper[4800]: E1125 16:59:49.903080 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea\": container with ID starting with c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea not found: ID does not exist" containerID="c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.903146 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea"} err="failed to get container status \"c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea\": rpc error: code = NotFound desc = could not find container \"c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea\": container with ID starting with c6fc76747e258ee95829aff5b5fa801c8ef25fa1ffee0b955997d8237acfc2ea not found: ID does not exist" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.903180 4800 scope.go:117] "RemoveContainer" containerID="91f8e4025edd36fb45c6bf5125ec9147145873c238115d49417f54ca95814f98" Nov 25 16:59:49 crc kubenswrapper[4800]: E1125 16:59:49.903512 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91f8e4025edd36fb45c6bf5125ec9147145873c238115d49417f54ca95814f98\": container with ID starting with 91f8e4025edd36fb45c6bf5125ec9147145873c238115d49417f54ca95814f98 not found: ID does not exist" containerID="91f8e4025edd36fb45c6bf5125ec9147145873c238115d49417f54ca95814f98" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.903562 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91f8e4025edd36fb45c6bf5125ec9147145873c238115d49417f54ca95814f98"} err="failed to get container status \"91f8e4025edd36fb45c6bf5125ec9147145873c238115d49417f54ca95814f98\": rpc error: code = NotFound desc = could not find container \"91f8e4025edd36fb45c6bf5125ec9147145873c238115d49417f54ca95814f98\": container with ID starting with 91f8e4025edd36fb45c6bf5125ec9147145873c238115d49417f54ca95814f98 not found: ID does not exist" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.903592 4800 scope.go:117] "RemoveContainer" containerID="1ec2a79012a3460796c8aa94dccbc3fbec2f7f9575bbf3c46ed18908c8aed617" Nov 25 16:59:49 crc kubenswrapper[4800]: E1125 16:59:49.903887 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ec2a79012a3460796c8aa94dccbc3fbec2f7f9575bbf3c46ed18908c8aed617\": container with ID starting with 1ec2a79012a3460796c8aa94dccbc3fbec2f7f9575bbf3c46ed18908c8aed617 not found: ID does not exist" containerID="1ec2a79012a3460796c8aa94dccbc3fbec2f7f9575bbf3c46ed18908c8aed617" Nov 25 16:59:49 crc kubenswrapper[4800]: I1125 16:59:49.903912 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ec2a79012a3460796c8aa94dccbc3fbec2f7f9575bbf3c46ed18908c8aed617"} err="failed to get container status \"1ec2a79012a3460796c8aa94dccbc3fbec2f7f9575bbf3c46ed18908c8aed617\": rpc error: code = NotFound desc = could not find container \"1ec2a79012a3460796c8aa94dccbc3fbec2f7f9575bbf3c46ed18908c8aed617\": container with ID starting with 1ec2a79012a3460796c8aa94dccbc3fbec2f7f9575bbf3c46ed18908c8aed617 not found: ID does not exist" Nov 25 16:59:51 crc kubenswrapper[4800]: I1125 16:59:51.804185 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb611793-cd49-4a1f-9faa-1aa2d7233db6" path="/var/lib/kubelet/pods/fb611793-cd49-4a1f-9faa-1aa2d7233db6/volumes" Nov 25 16:59:53 crc kubenswrapper[4800]: I1125 16:59:53.785072 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 16:59:53 crc kubenswrapper[4800]: E1125 16:59:53.785736 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.162884 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp"] Nov 25 17:00:00 crc kubenswrapper[4800]: E1125 17:00:00.163897 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb611793-cd49-4a1f-9faa-1aa2d7233db6" containerName="extract-content" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.163916 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb611793-cd49-4a1f-9faa-1aa2d7233db6" containerName="extract-content" Nov 25 17:00:00 crc kubenswrapper[4800]: E1125 17:00:00.163949 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb611793-cd49-4a1f-9faa-1aa2d7233db6" containerName="registry-server" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.163957 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb611793-cd49-4a1f-9faa-1aa2d7233db6" containerName="registry-server" Nov 25 17:00:00 crc kubenswrapper[4800]: E1125 17:00:00.163983 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb611793-cd49-4a1f-9faa-1aa2d7233db6" containerName="extract-utilities" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.163993 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb611793-cd49-4a1f-9faa-1aa2d7233db6" containerName="extract-utilities" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.164256 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb611793-cd49-4a1f-9faa-1aa2d7233db6" containerName="registry-server" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.165082 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.167109 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.167453 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.228326 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp"] Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.306023 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/085da92b-709c-4295-bf75-3f70925c16a1-secret-volume\") pod \"collect-profiles-29401500-q9vgp\" (UID: \"085da92b-709c-4295-bf75-3f70925c16a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.306122 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/085da92b-709c-4295-bf75-3f70925c16a1-config-volume\") pod \"collect-profiles-29401500-q9vgp\" (UID: \"085da92b-709c-4295-bf75-3f70925c16a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.306151 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7vvg\" (UniqueName: \"kubernetes.io/projected/085da92b-709c-4295-bf75-3f70925c16a1-kube-api-access-c7vvg\") pod \"collect-profiles-29401500-q9vgp\" (UID: \"085da92b-709c-4295-bf75-3f70925c16a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.408624 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/085da92b-709c-4295-bf75-3f70925c16a1-secret-volume\") pod \"collect-profiles-29401500-q9vgp\" (UID: \"085da92b-709c-4295-bf75-3f70925c16a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.408752 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/085da92b-709c-4295-bf75-3f70925c16a1-config-volume\") pod \"collect-profiles-29401500-q9vgp\" (UID: \"085da92b-709c-4295-bf75-3f70925c16a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.408786 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7vvg\" (UniqueName: \"kubernetes.io/projected/085da92b-709c-4295-bf75-3f70925c16a1-kube-api-access-c7vvg\") pod \"collect-profiles-29401500-q9vgp\" (UID: \"085da92b-709c-4295-bf75-3f70925c16a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.410153 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/085da92b-709c-4295-bf75-3f70925c16a1-config-volume\") pod \"collect-profiles-29401500-q9vgp\" (UID: \"085da92b-709c-4295-bf75-3f70925c16a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.421470 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/085da92b-709c-4295-bf75-3f70925c16a1-secret-volume\") pod \"collect-profiles-29401500-q9vgp\" (UID: \"085da92b-709c-4295-bf75-3f70925c16a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.424319 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7vvg\" (UniqueName: \"kubernetes.io/projected/085da92b-709c-4295-bf75-3f70925c16a1-kube-api-access-c7vvg\") pod \"collect-profiles-29401500-q9vgp\" (UID: \"085da92b-709c-4295-bf75-3f70925c16a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:00 crc kubenswrapper[4800]: I1125 17:00:00.536514 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:01 crc kubenswrapper[4800]: I1125 17:00:01.025221 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp"] Nov 25 17:00:01 crc kubenswrapper[4800]: W1125 17:00:01.037745 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod085da92b_709c_4295_bf75_3f70925c16a1.slice/crio-c7608f811738887ea022c5c7eef4b91fa19624f49e57003f1660b4f91cb37a3a WatchSource:0}: Error finding container c7608f811738887ea022c5c7eef4b91fa19624f49e57003f1660b4f91cb37a3a: Status 404 returned error can't find the container with id c7608f811738887ea022c5c7eef4b91fa19624f49e57003f1660b4f91cb37a3a Nov 25 17:00:01 crc kubenswrapper[4800]: I1125 17:00:01.948355 4800 generic.go:334] "Generic (PLEG): container finished" podID="085da92b-709c-4295-bf75-3f70925c16a1" containerID="6464fe4d48a54baf61038543dcb6d719b7770a027e0d27fb7656b1447a103ce6" exitCode=0 Nov 25 17:00:01 crc kubenswrapper[4800]: I1125 17:00:01.948474 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" event={"ID":"085da92b-709c-4295-bf75-3f70925c16a1","Type":"ContainerDied","Data":"6464fe4d48a54baf61038543dcb6d719b7770a027e0d27fb7656b1447a103ce6"} Nov 25 17:00:01 crc kubenswrapper[4800]: I1125 17:00:01.948904 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" event={"ID":"085da92b-709c-4295-bf75-3f70925c16a1","Type":"ContainerStarted","Data":"c7608f811738887ea022c5c7eef4b91fa19624f49e57003f1660b4f91cb37a3a"} Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.396551 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.584149 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7vvg\" (UniqueName: \"kubernetes.io/projected/085da92b-709c-4295-bf75-3f70925c16a1-kube-api-access-c7vvg\") pod \"085da92b-709c-4295-bf75-3f70925c16a1\" (UID: \"085da92b-709c-4295-bf75-3f70925c16a1\") " Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.584283 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/085da92b-709c-4295-bf75-3f70925c16a1-config-volume\") pod \"085da92b-709c-4295-bf75-3f70925c16a1\" (UID: \"085da92b-709c-4295-bf75-3f70925c16a1\") " Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.584528 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/085da92b-709c-4295-bf75-3f70925c16a1-secret-volume\") pod \"085da92b-709c-4295-bf75-3f70925c16a1\" (UID: \"085da92b-709c-4295-bf75-3f70925c16a1\") " Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.585322 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/085da92b-709c-4295-bf75-3f70925c16a1-config-volume" (OuterVolumeSpecName: "config-volume") pod "085da92b-709c-4295-bf75-3f70925c16a1" (UID: "085da92b-709c-4295-bf75-3f70925c16a1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.591120 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/085da92b-709c-4295-bf75-3f70925c16a1-kube-api-access-c7vvg" (OuterVolumeSpecName: "kube-api-access-c7vvg") pod "085da92b-709c-4295-bf75-3f70925c16a1" (UID: "085da92b-709c-4295-bf75-3f70925c16a1"). InnerVolumeSpecName "kube-api-access-c7vvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.592615 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/085da92b-709c-4295-bf75-3f70925c16a1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "085da92b-709c-4295-bf75-3f70925c16a1" (UID: "085da92b-709c-4295-bf75-3f70925c16a1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.687764 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/085da92b-709c-4295-bf75-3f70925c16a1-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.687882 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/085da92b-709c-4295-bf75-3f70925c16a1-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.687908 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7vvg\" (UniqueName: \"kubernetes.io/projected/085da92b-709c-4295-bf75-3f70925c16a1-kube-api-access-c7vvg\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.977707 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" event={"ID":"085da92b-709c-4295-bf75-3f70925c16a1","Type":"ContainerDied","Data":"c7608f811738887ea022c5c7eef4b91fa19624f49e57003f1660b4f91cb37a3a"} Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.977776 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c7608f811738887ea022c5c7eef4b91fa19624f49e57003f1660b4f91cb37a3a" Nov 25 17:00:03 crc kubenswrapper[4800]: I1125 17:00:03.977888 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp" Nov 25 17:00:04 crc kubenswrapper[4800]: I1125 17:00:04.490214 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk"] Nov 25 17:00:04 crc kubenswrapper[4800]: I1125 17:00:04.505067 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401455-7grwk"] Nov 25 17:00:05 crc kubenswrapper[4800]: I1125 17:00:05.805118 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d81d0a97-3e2c-40a6-ba50-9b7798e7c05b" path="/var/lib/kubelet/pods/d81d0a97-3e2c-40a6-ba50-9b7798e7c05b/volumes" Nov 25 17:00:07 crc kubenswrapper[4800]: I1125 17:00:07.785981 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 17:00:07 crc kubenswrapper[4800]: E1125 17:00:07.786662 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:00:22 crc kubenswrapper[4800]: I1125 17:00:22.785728 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 17:00:22 crc kubenswrapper[4800]: E1125 17:00:22.787110 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:00:32 crc kubenswrapper[4800]: I1125 17:00:32.655617 4800 scope.go:117] "RemoveContainer" containerID="53f53b0cdcf5e13fb81c3a43e9b3598c65cf827cdd95d6a6eb3e2a17c5b2a7ca" Nov 25 17:00:35 crc kubenswrapper[4800]: I1125 17:00:35.786129 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 17:00:35 crc kubenswrapper[4800]: E1125 17:00:35.786925 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:00:47 crc kubenswrapper[4800]: I1125 17:00:47.786494 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 17:00:47 crc kubenswrapper[4800]: E1125 17:00:47.788011 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:00:58 crc kubenswrapper[4800]: I1125 17:00:58.785670 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 17:00:58 crc kubenswrapper[4800]: E1125 17:00:58.786513 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.179489 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401501-ws7bm"] Nov 25 17:01:00 crc kubenswrapper[4800]: E1125 17:01:00.180163 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="085da92b-709c-4295-bf75-3f70925c16a1" containerName="collect-profiles" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.180176 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="085da92b-709c-4295-bf75-3f70925c16a1" containerName="collect-profiles" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.180423 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="085da92b-709c-4295-bf75-3f70925c16a1" containerName="collect-profiles" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.181075 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.265602 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-fernet-keys\") pod \"keystone-cron-29401501-ws7bm\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.265892 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfngm\" (UniqueName: \"kubernetes.io/projected/3e5c3bd3-4074-4f19-8810-2c93766f0f76-kube-api-access-nfngm\") pod \"keystone-cron-29401501-ws7bm\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.266003 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-combined-ca-bundle\") pod \"keystone-cron-29401501-ws7bm\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.266076 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-config-data\") pod \"keystone-cron-29401501-ws7bm\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.367976 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-config-data\") pod \"keystone-cron-29401501-ws7bm\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.368135 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-fernet-keys\") pod \"keystone-cron-29401501-ws7bm\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.368225 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfngm\" (UniqueName: \"kubernetes.io/projected/3e5c3bd3-4074-4f19-8810-2c93766f0f76-kube-api-access-nfngm\") pod \"keystone-cron-29401501-ws7bm\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.368275 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-combined-ca-bundle\") pod \"keystone-cron-29401501-ws7bm\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.374506 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-fernet-keys\") pod \"keystone-cron-29401501-ws7bm\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.374826 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-config-data\") pod \"keystone-cron-29401501-ws7bm\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.374997 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-combined-ca-bundle\") pod \"keystone-cron-29401501-ws7bm\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.388712 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfngm\" (UniqueName: \"kubernetes.io/projected/3e5c3bd3-4074-4f19-8810-2c93766f0f76-kube-api-access-nfngm\") pod \"keystone-cron-29401501-ws7bm\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.447499 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401501-ws7bm"] Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.508996 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:00 crc kubenswrapper[4800]: I1125 17:01:00.972579 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401501-ws7bm"] Nov 25 17:01:01 crc kubenswrapper[4800]: I1125 17:01:01.511743 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401501-ws7bm" event={"ID":"3e5c3bd3-4074-4f19-8810-2c93766f0f76","Type":"ContainerStarted","Data":"dec8f6db018cd3189e1ab4a1814e93b0dc8d80f1a6c8d9b0f64f5463d5790be4"} Nov 25 17:01:01 crc kubenswrapper[4800]: I1125 17:01:01.512128 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401501-ws7bm" event={"ID":"3e5c3bd3-4074-4f19-8810-2c93766f0f76","Type":"ContainerStarted","Data":"440df12cb36898392a8ccf38f722760050e6e3d4a7cb75972d7f2bc8377835a5"} Nov 25 17:01:01 crc kubenswrapper[4800]: I1125 17:01:01.531926 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401501-ws7bm" podStartSLOduration=1.531909129 podStartE2EDuration="1.531909129s" podCreationTimestamp="2025-11-25 17:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:01:01.524921528 +0000 UTC m=+6222.579330010" watchObservedRunningTime="2025-11-25 17:01:01.531909129 +0000 UTC m=+6222.586317611" Nov 25 17:01:06 crc kubenswrapper[4800]: I1125 17:01:06.557288 4800 generic.go:334] "Generic (PLEG): container finished" podID="3e5c3bd3-4074-4f19-8810-2c93766f0f76" containerID="dec8f6db018cd3189e1ab4a1814e93b0dc8d80f1a6c8d9b0f64f5463d5790be4" exitCode=0 Nov 25 17:01:06 crc kubenswrapper[4800]: I1125 17:01:06.557378 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401501-ws7bm" event={"ID":"3e5c3bd3-4074-4f19-8810-2c93766f0f76","Type":"ContainerDied","Data":"dec8f6db018cd3189e1ab4a1814e93b0dc8d80f1a6c8d9b0f64f5463d5790be4"} Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.075241 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.099232 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-fernet-keys\") pod \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.100281 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-config-data\") pod \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.100556 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-combined-ca-bundle\") pod \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.100595 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nfngm\" (UniqueName: \"kubernetes.io/projected/3e5c3bd3-4074-4f19-8810-2c93766f0f76-kube-api-access-nfngm\") pod \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\" (UID: \"3e5c3bd3-4074-4f19-8810-2c93766f0f76\") " Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.106210 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e5c3bd3-4074-4f19-8810-2c93766f0f76-kube-api-access-nfngm" (OuterVolumeSpecName: "kube-api-access-nfngm") pod "3e5c3bd3-4074-4f19-8810-2c93766f0f76" (UID: "3e5c3bd3-4074-4f19-8810-2c93766f0f76"). InnerVolumeSpecName "kube-api-access-nfngm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.107070 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3e5c3bd3-4074-4f19-8810-2c93766f0f76" (UID: "3e5c3bd3-4074-4f19-8810-2c93766f0f76"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.142259 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3e5c3bd3-4074-4f19-8810-2c93766f0f76" (UID: "3e5c3bd3-4074-4f19-8810-2c93766f0f76"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.149856 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-config-data" (OuterVolumeSpecName: "config-data") pod "3e5c3bd3-4074-4f19-8810-2c93766f0f76" (UID: "3e5c3bd3-4074-4f19-8810-2c93766f0f76"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.204101 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.204152 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nfngm\" (UniqueName: \"kubernetes.io/projected/3e5c3bd3-4074-4f19-8810-2c93766f0f76-kube-api-access-nfngm\") on node \"crc\" DevicePath \"\"" Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.204218 4800 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.204246 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e5c3bd3-4074-4f19-8810-2c93766f0f76-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.606779 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401501-ws7bm" event={"ID":"3e5c3bd3-4074-4f19-8810-2c93766f0f76","Type":"ContainerDied","Data":"440df12cb36898392a8ccf38f722760050e6e3d4a7cb75972d7f2bc8377835a5"} Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.606813 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="440df12cb36898392a8ccf38f722760050e6e3d4a7cb75972d7f2bc8377835a5" Nov 25 17:01:08 crc kubenswrapper[4800]: I1125 17:01:08.606875 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401501-ws7bm" Nov 25 17:01:11 crc kubenswrapper[4800]: I1125 17:01:11.786177 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 17:01:11 crc kubenswrapper[4800]: E1125 17:01:11.786947 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:01:23 crc kubenswrapper[4800]: I1125 17:01:23.785891 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 17:01:23 crc kubenswrapper[4800]: E1125 17:01:23.786625 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:01:35 crc kubenswrapper[4800]: I1125 17:01:35.785780 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 17:01:35 crc kubenswrapper[4800]: E1125 17:01:35.786623 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:01:47 crc kubenswrapper[4800]: I1125 17:01:47.786791 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 17:01:55 crc kubenswrapper[4800]: I1125 17:01:55.094038 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"a39e9cc8c6bd4cbb42d4f311c2264ca465b7b5094e371dd1898f17961ac816fb"} Nov 25 17:04:12 crc kubenswrapper[4800]: I1125 17:04:12.640196 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:04:12 crc kubenswrapper[4800]: I1125 17:04:12.640808 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:04:42 crc kubenswrapper[4800]: I1125 17:04:42.640103 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:04:42 crc kubenswrapper[4800]: I1125 17:04:42.640716 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:05:12 crc kubenswrapper[4800]: I1125 17:05:12.640563 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:05:12 crc kubenswrapper[4800]: I1125 17:05:12.641412 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:05:12 crc kubenswrapper[4800]: I1125 17:05:12.641487 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 17:05:12 crc kubenswrapper[4800]: I1125 17:05:12.642659 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a39e9cc8c6bd4cbb42d4f311c2264ca465b7b5094e371dd1898f17961ac816fb"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:05:12 crc kubenswrapper[4800]: I1125 17:05:12.642759 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://a39e9cc8c6bd4cbb42d4f311c2264ca465b7b5094e371dd1898f17961ac816fb" gracePeriod=600 Nov 25 17:05:13 crc kubenswrapper[4800]: I1125 17:05:13.063136 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="a39e9cc8c6bd4cbb42d4f311c2264ca465b7b5094e371dd1898f17961ac816fb" exitCode=0 Nov 25 17:05:13 crc kubenswrapper[4800]: I1125 17:05:13.063164 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"a39e9cc8c6bd4cbb42d4f311c2264ca465b7b5094e371dd1898f17961ac816fb"} Nov 25 17:05:13 crc kubenswrapper[4800]: I1125 17:05:13.063614 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a"} Nov 25 17:05:13 crc kubenswrapper[4800]: I1125 17:05:13.063651 4800 scope.go:117] "RemoveContainer" containerID="3cf97d45d46ceb016cb78dd88e45f0115a4fce6ee414af5da778e0c88290a490" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.447517 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tjrn9"] Nov 25 17:06:58 crc kubenswrapper[4800]: E1125 17:06:58.448625 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e5c3bd3-4074-4f19-8810-2c93766f0f76" containerName="keystone-cron" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.448642 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e5c3bd3-4074-4f19-8810-2c93766f0f76" containerName="keystone-cron" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.448896 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e5c3bd3-4074-4f19-8810-2c93766f0f76" containerName="keystone-cron" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.450362 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.464399 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tjrn9"] Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.554036 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b1bdd3-6e63-46da-bcad-b59a9fca8554-catalog-content\") pod \"community-operators-tjrn9\" (UID: \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\") " pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.554789 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b1bdd3-6e63-46da-bcad-b59a9fca8554-utilities\") pod \"community-operators-tjrn9\" (UID: \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\") " pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.554838 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kx4lk\" (UniqueName: \"kubernetes.io/projected/00b1bdd3-6e63-46da-bcad-b59a9fca8554-kube-api-access-kx4lk\") pod \"community-operators-tjrn9\" (UID: \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\") " pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.657081 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b1bdd3-6e63-46da-bcad-b59a9fca8554-catalog-content\") pod \"community-operators-tjrn9\" (UID: \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\") " pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.657275 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b1bdd3-6e63-46da-bcad-b59a9fca8554-utilities\") pod \"community-operators-tjrn9\" (UID: \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\") " pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.657338 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kx4lk\" (UniqueName: \"kubernetes.io/projected/00b1bdd3-6e63-46da-bcad-b59a9fca8554-kube-api-access-kx4lk\") pod \"community-operators-tjrn9\" (UID: \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\") " pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.658199 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b1bdd3-6e63-46da-bcad-b59a9fca8554-catalog-content\") pod \"community-operators-tjrn9\" (UID: \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\") " pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.658287 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b1bdd3-6e63-46da-bcad-b59a9fca8554-utilities\") pod \"community-operators-tjrn9\" (UID: \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\") " pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.683901 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kx4lk\" (UniqueName: \"kubernetes.io/projected/00b1bdd3-6e63-46da-bcad-b59a9fca8554-kube-api-access-kx4lk\") pod \"community-operators-tjrn9\" (UID: \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\") " pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:06:58 crc kubenswrapper[4800]: I1125 17:06:58.788820 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:06:59 crc kubenswrapper[4800]: I1125 17:06:59.338223 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tjrn9"] Nov 25 17:06:59 crc kubenswrapper[4800]: W1125 17:06:59.350702 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00b1bdd3_6e63_46da_bcad_b59a9fca8554.slice/crio-89e8d58175e86e6f4bf67bd17d82440edcd9fc1550e21d4b7e695474ffe1bfe2 WatchSource:0}: Error finding container 89e8d58175e86e6f4bf67bd17d82440edcd9fc1550e21d4b7e695474ffe1bfe2: Status 404 returned error can't find the container with id 89e8d58175e86e6f4bf67bd17d82440edcd9fc1550e21d4b7e695474ffe1bfe2 Nov 25 17:07:00 crc kubenswrapper[4800]: I1125 17:07:00.130329 4800 generic.go:334] "Generic (PLEG): container finished" podID="00b1bdd3-6e63-46da-bcad-b59a9fca8554" containerID="8e1a70fb5c0bafad4fa73d79efdc238bf430fcbc0d6259cf1e001d14d1731b42" exitCode=0 Nov 25 17:07:00 crc kubenswrapper[4800]: I1125 17:07:00.130538 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tjrn9" event={"ID":"00b1bdd3-6e63-46da-bcad-b59a9fca8554","Type":"ContainerDied","Data":"8e1a70fb5c0bafad4fa73d79efdc238bf430fcbc0d6259cf1e001d14d1731b42"} Nov 25 17:07:00 crc kubenswrapper[4800]: I1125 17:07:00.131006 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tjrn9" event={"ID":"00b1bdd3-6e63-46da-bcad-b59a9fca8554","Type":"ContainerStarted","Data":"89e8d58175e86e6f4bf67bd17d82440edcd9fc1550e21d4b7e695474ffe1bfe2"} Nov 25 17:07:00 crc kubenswrapper[4800]: I1125 17:07:00.135313 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.141556 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tjrn9" event={"ID":"00b1bdd3-6e63-46da-bcad-b59a9fca8554","Type":"ContainerStarted","Data":"a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90"} Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.433929 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6sdg6"] Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.435895 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.463980 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6sdg6"] Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.519138 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2dpv\" (UniqueName: \"kubernetes.io/projected/5f513b70-0094-4178-8d25-aefd4bb220f2-kube-api-access-t2dpv\") pod \"redhat-operators-6sdg6\" (UID: \"5f513b70-0094-4178-8d25-aefd4bb220f2\") " pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.519275 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f513b70-0094-4178-8d25-aefd4bb220f2-utilities\") pod \"redhat-operators-6sdg6\" (UID: \"5f513b70-0094-4178-8d25-aefd4bb220f2\") " pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.519305 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f513b70-0094-4178-8d25-aefd4bb220f2-catalog-content\") pod \"redhat-operators-6sdg6\" (UID: \"5f513b70-0094-4178-8d25-aefd4bb220f2\") " pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.621517 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f513b70-0094-4178-8d25-aefd4bb220f2-utilities\") pod \"redhat-operators-6sdg6\" (UID: \"5f513b70-0094-4178-8d25-aefd4bb220f2\") " pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.621596 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f513b70-0094-4178-8d25-aefd4bb220f2-catalog-content\") pod \"redhat-operators-6sdg6\" (UID: \"5f513b70-0094-4178-8d25-aefd4bb220f2\") " pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.621716 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2dpv\" (UniqueName: \"kubernetes.io/projected/5f513b70-0094-4178-8d25-aefd4bb220f2-kube-api-access-t2dpv\") pod \"redhat-operators-6sdg6\" (UID: \"5f513b70-0094-4178-8d25-aefd4bb220f2\") " pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.622084 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f513b70-0094-4178-8d25-aefd4bb220f2-catalog-content\") pod \"redhat-operators-6sdg6\" (UID: \"5f513b70-0094-4178-8d25-aefd4bb220f2\") " pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.622309 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f513b70-0094-4178-8d25-aefd4bb220f2-utilities\") pod \"redhat-operators-6sdg6\" (UID: \"5f513b70-0094-4178-8d25-aefd4bb220f2\") " pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.653273 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2dpv\" (UniqueName: \"kubernetes.io/projected/5f513b70-0094-4178-8d25-aefd4bb220f2-kube-api-access-t2dpv\") pod \"redhat-operators-6sdg6\" (UID: \"5f513b70-0094-4178-8d25-aefd4bb220f2\") " pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:01 crc kubenswrapper[4800]: I1125 17:07:01.764447 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:02 crc kubenswrapper[4800]: I1125 17:07:02.290429 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6sdg6"] Nov 25 17:07:02 crc kubenswrapper[4800]: W1125 17:07:02.296762 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f513b70_0094_4178_8d25_aefd4bb220f2.slice/crio-beef8c76b4b8184e9e6f672f37ad3c0e871114d6173c2b182dbfe389391d21eb WatchSource:0}: Error finding container beef8c76b4b8184e9e6f672f37ad3c0e871114d6173c2b182dbfe389391d21eb: Status 404 returned error can't find the container with id beef8c76b4b8184e9e6f672f37ad3c0e871114d6173c2b182dbfe389391d21eb Nov 25 17:07:02 crc kubenswrapper[4800]: E1125 17:07:02.641974 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00b1bdd3_6e63_46da_bcad_b59a9fca8554.slice/crio-a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00b1bdd3_6e63_46da_bcad_b59a9fca8554.slice/crio-conmon-a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:07:03 crc kubenswrapper[4800]: I1125 17:07:03.160809 4800 generic.go:334] "Generic (PLEG): container finished" podID="5f513b70-0094-4178-8d25-aefd4bb220f2" containerID="0a55f747a833173c16b4c215b7de65b4ee4e9b0a44530ee2219d0a686d6f0a7f" exitCode=0 Nov 25 17:07:03 crc kubenswrapper[4800]: I1125 17:07:03.160879 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sdg6" event={"ID":"5f513b70-0094-4178-8d25-aefd4bb220f2","Type":"ContainerDied","Data":"0a55f747a833173c16b4c215b7de65b4ee4e9b0a44530ee2219d0a686d6f0a7f"} Nov 25 17:07:03 crc kubenswrapper[4800]: I1125 17:07:03.161158 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sdg6" event={"ID":"5f513b70-0094-4178-8d25-aefd4bb220f2","Type":"ContainerStarted","Data":"beef8c76b4b8184e9e6f672f37ad3c0e871114d6173c2b182dbfe389391d21eb"} Nov 25 17:07:03 crc kubenswrapper[4800]: I1125 17:07:03.163640 4800 generic.go:334] "Generic (PLEG): container finished" podID="00b1bdd3-6e63-46da-bcad-b59a9fca8554" containerID="a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90" exitCode=0 Nov 25 17:07:03 crc kubenswrapper[4800]: I1125 17:07:03.163669 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tjrn9" event={"ID":"00b1bdd3-6e63-46da-bcad-b59a9fca8554","Type":"ContainerDied","Data":"a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90"} Nov 25 17:07:04 crc kubenswrapper[4800]: I1125 17:07:04.174473 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sdg6" event={"ID":"5f513b70-0094-4178-8d25-aefd4bb220f2","Type":"ContainerStarted","Data":"ae1addcffe968fca059f4f54ec3393fccb31040c0abbaa50d1bcaae2c0a8d390"} Nov 25 17:07:04 crc kubenswrapper[4800]: I1125 17:07:04.178379 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tjrn9" event={"ID":"00b1bdd3-6e63-46da-bcad-b59a9fca8554","Type":"ContainerStarted","Data":"dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440"} Nov 25 17:07:04 crc kubenswrapper[4800]: I1125 17:07:04.232166 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tjrn9" podStartSLOduration=2.821531708 podStartE2EDuration="6.232143915s" podCreationTimestamp="2025-11-25 17:06:58 +0000 UTC" firstStartedPulling="2025-11-25 17:07:00.134915834 +0000 UTC m=+6581.189324346" lastFinishedPulling="2025-11-25 17:07:03.545528061 +0000 UTC m=+6584.599936553" observedRunningTime="2025-11-25 17:07:04.220747424 +0000 UTC m=+6585.275155936" watchObservedRunningTime="2025-11-25 17:07:04.232143915 +0000 UTC m=+6585.286552407" Nov 25 17:07:08 crc kubenswrapper[4800]: I1125 17:07:08.789385 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:07:08 crc kubenswrapper[4800]: I1125 17:07:08.790135 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:07:08 crc kubenswrapper[4800]: I1125 17:07:08.880120 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:07:09 crc kubenswrapper[4800]: I1125 17:07:09.302800 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:07:09 crc kubenswrapper[4800]: I1125 17:07:09.834270 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tjrn9"] Nov 25 17:07:10 crc kubenswrapper[4800]: I1125 17:07:10.242257 4800 generic.go:334] "Generic (PLEG): container finished" podID="5f513b70-0094-4178-8d25-aefd4bb220f2" containerID="ae1addcffe968fca059f4f54ec3393fccb31040c0abbaa50d1bcaae2c0a8d390" exitCode=0 Nov 25 17:07:10 crc kubenswrapper[4800]: I1125 17:07:10.242407 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sdg6" event={"ID":"5f513b70-0094-4178-8d25-aefd4bb220f2","Type":"ContainerDied","Data":"ae1addcffe968fca059f4f54ec3393fccb31040c0abbaa50d1bcaae2c0a8d390"} Nov 25 17:07:11 crc kubenswrapper[4800]: I1125 17:07:11.252643 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sdg6" event={"ID":"5f513b70-0094-4178-8d25-aefd4bb220f2","Type":"ContainerStarted","Data":"0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828"} Nov 25 17:07:11 crc kubenswrapper[4800]: I1125 17:07:11.252783 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tjrn9" podUID="00b1bdd3-6e63-46da-bcad-b59a9fca8554" containerName="registry-server" containerID="cri-o://dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440" gracePeriod=2 Nov 25 17:07:11 crc kubenswrapper[4800]: I1125 17:07:11.282914 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6sdg6" podStartSLOduration=2.5702398459999998 podStartE2EDuration="10.282889936s" podCreationTimestamp="2025-11-25 17:07:01 +0000 UTC" firstStartedPulling="2025-11-25 17:07:03.163571364 +0000 UTC m=+6584.217979846" lastFinishedPulling="2025-11-25 17:07:10.876221424 +0000 UTC m=+6591.930629936" observedRunningTime="2025-11-25 17:07:11.277145559 +0000 UTC m=+6592.331554041" watchObservedRunningTime="2025-11-25 17:07:11.282889936 +0000 UTC m=+6592.337298438" Nov 25 17:07:11 crc kubenswrapper[4800]: I1125 17:07:11.764720 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:11 crc kubenswrapper[4800]: I1125 17:07:11.765157 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:11 crc kubenswrapper[4800]: I1125 17:07:11.934969 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.073199 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b1bdd3-6e63-46da-bcad-b59a9fca8554-utilities\") pod \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\" (UID: \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\") " Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.073285 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kx4lk\" (UniqueName: \"kubernetes.io/projected/00b1bdd3-6e63-46da-bcad-b59a9fca8554-kube-api-access-kx4lk\") pod \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\" (UID: \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\") " Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.073332 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b1bdd3-6e63-46da-bcad-b59a9fca8554-catalog-content\") pod \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\" (UID: \"00b1bdd3-6e63-46da-bcad-b59a9fca8554\") " Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.077611 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00b1bdd3-6e63-46da-bcad-b59a9fca8554-utilities" (OuterVolumeSpecName: "utilities") pod "00b1bdd3-6e63-46da-bcad-b59a9fca8554" (UID: "00b1bdd3-6e63-46da-bcad-b59a9fca8554"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.094135 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00b1bdd3-6e63-46da-bcad-b59a9fca8554-kube-api-access-kx4lk" (OuterVolumeSpecName: "kube-api-access-kx4lk") pod "00b1bdd3-6e63-46da-bcad-b59a9fca8554" (UID: "00b1bdd3-6e63-46da-bcad-b59a9fca8554"). InnerVolumeSpecName "kube-api-access-kx4lk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.129549 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00b1bdd3-6e63-46da-bcad-b59a9fca8554-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "00b1bdd3-6e63-46da-bcad-b59a9fca8554" (UID: "00b1bdd3-6e63-46da-bcad-b59a9fca8554"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.177756 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b1bdd3-6e63-46da-bcad-b59a9fca8554-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.177791 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kx4lk\" (UniqueName: \"kubernetes.io/projected/00b1bdd3-6e63-46da-bcad-b59a9fca8554-kube-api-access-kx4lk\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.177802 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b1bdd3-6e63-46da-bcad-b59a9fca8554-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.264620 4800 generic.go:334] "Generic (PLEG): container finished" podID="00b1bdd3-6e63-46da-bcad-b59a9fca8554" containerID="dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440" exitCode=0 Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.264682 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tjrn9" event={"ID":"00b1bdd3-6e63-46da-bcad-b59a9fca8554","Type":"ContainerDied","Data":"dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440"} Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.264740 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tjrn9" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.264760 4800 scope.go:117] "RemoveContainer" containerID="dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.264743 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tjrn9" event={"ID":"00b1bdd3-6e63-46da-bcad-b59a9fca8554","Type":"ContainerDied","Data":"89e8d58175e86e6f4bf67bd17d82440edcd9fc1550e21d4b7e695474ffe1bfe2"} Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.298802 4800 scope.go:117] "RemoveContainer" containerID="a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.321219 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tjrn9"] Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.334233 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tjrn9"] Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.340319 4800 scope.go:117] "RemoveContainer" containerID="8e1a70fb5c0bafad4fa73d79efdc238bf430fcbc0d6259cf1e001d14d1731b42" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.375341 4800 scope.go:117] "RemoveContainer" containerID="dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440" Nov 25 17:07:12 crc kubenswrapper[4800]: E1125 17:07:12.376125 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440\": container with ID starting with dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440 not found: ID does not exist" containerID="dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.376155 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440"} err="failed to get container status \"dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440\": rpc error: code = NotFound desc = could not find container \"dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440\": container with ID starting with dfb3a94e5530297d34dcd6d2d7e4219e4568e9c3a3f0894eb9f68690c12ad440 not found: ID does not exist" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.376176 4800 scope.go:117] "RemoveContainer" containerID="a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90" Nov 25 17:07:12 crc kubenswrapper[4800]: E1125 17:07:12.376514 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90\": container with ID starting with a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90 not found: ID does not exist" containerID="a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.376534 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90"} err="failed to get container status \"a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90\": rpc error: code = NotFound desc = could not find container \"a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90\": container with ID starting with a27bd96a5648de971477e2bfdecafd1815261e39b059e6a81de13667a7585c90 not found: ID does not exist" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.376549 4800 scope.go:117] "RemoveContainer" containerID="8e1a70fb5c0bafad4fa73d79efdc238bf430fcbc0d6259cf1e001d14d1731b42" Nov 25 17:07:12 crc kubenswrapper[4800]: E1125 17:07:12.376916 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e1a70fb5c0bafad4fa73d79efdc238bf430fcbc0d6259cf1e001d14d1731b42\": container with ID starting with 8e1a70fb5c0bafad4fa73d79efdc238bf430fcbc0d6259cf1e001d14d1731b42 not found: ID does not exist" containerID="8e1a70fb5c0bafad4fa73d79efdc238bf430fcbc0d6259cf1e001d14d1731b42" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.376962 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e1a70fb5c0bafad4fa73d79efdc238bf430fcbc0d6259cf1e001d14d1731b42"} err="failed to get container status \"8e1a70fb5c0bafad4fa73d79efdc238bf430fcbc0d6259cf1e001d14d1731b42\": rpc error: code = NotFound desc = could not find container \"8e1a70fb5c0bafad4fa73d79efdc238bf430fcbc0d6259cf1e001d14d1731b42\": container with ID starting with 8e1a70fb5c0bafad4fa73d79efdc238bf430fcbc0d6259cf1e001d14d1731b42 not found: ID does not exist" Nov 25 17:07:12 crc kubenswrapper[4800]: I1125 17:07:12.811361 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6sdg6" podUID="5f513b70-0094-4178-8d25-aefd4bb220f2" containerName="registry-server" probeResult="failure" output=< Nov 25 17:07:12 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 17:07:12 crc kubenswrapper[4800]: > Nov 25 17:07:13 crc kubenswrapper[4800]: I1125 17:07:13.797182 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00b1bdd3-6e63-46da-bcad-b59a9fca8554" path="/var/lib/kubelet/pods/00b1bdd3-6e63-46da-bcad-b59a9fca8554/volumes" Nov 25 17:07:21 crc kubenswrapper[4800]: I1125 17:07:21.842680 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:21 crc kubenswrapper[4800]: I1125 17:07:21.909667 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:22 crc kubenswrapper[4800]: I1125 17:07:22.085938 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6sdg6"] Nov 25 17:07:23 crc kubenswrapper[4800]: I1125 17:07:23.376821 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6sdg6" podUID="5f513b70-0094-4178-8d25-aefd4bb220f2" containerName="registry-server" containerID="cri-o://0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828" gracePeriod=2 Nov 25 17:07:23 crc kubenswrapper[4800]: I1125 17:07:23.959861 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.135347 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f513b70-0094-4178-8d25-aefd4bb220f2-utilities\") pod \"5f513b70-0094-4178-8d25-aefd4bb220f2\" (UID: \"5f513b70-0094-4178-8d25-aefd4bb220f2\") " Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.135423 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f513b70-0094-4178-8d25-aefd4bb220f2-catalog-content\") pod \"5f513b70-0094-4178-8d25-aefd4bb220f2\" (UID: \"5f513b70-0094-4178-8d25-aefd4bb220f2\") " Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.135572 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2dpv\" (UniqueName: \"kubernetes.io/projected/5f513b70-0094-4178-8d25-aefd4bb220f2-kube-api-access-t2dpv\") pod \"5f513b70-0094-4178-8d25-aefd4bb220f2\" (UID: \"5f513b70-0094-4178-8d25-aefd4bb220f2\") " Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.136623 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f513b70-0094-4178-8d25-aefd4bb220f2-utilities" (OuterVolumeSpecName: "utilities") pod "5f513b70-0094-4178-8d25-aefd4bb220f2" (UID: "5f513b70-0094-4178-8d25-aefd4bb220f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.142080 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f513b70-0094-4178-8d25-aefd4bb220f2-kube-api-access-t2dpv" (OuterVolumeSpecName: "kube-api-access-t2dpv") pod "5f513b70-0094-4178-8d25-aefd4bb220f2" (UID: "5f513b70-0094-4178-8d25-aefd4bb220f2"). InnerVolumeSpecName "kube-api-access-t2dpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.226121 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f513b70-0094-4178-8d25-aefd4bb220f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5f513b70-0094-4178-8d25-aefd4bb220f2" (UID: "5f513b70-0094-4178-8d25-aefd4bb220f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.237914 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f513b70-0094-4178-8d25-aefd4bb220f2-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.237969 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f513b70-0094-4178-8d25-aefd4bb220f2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.237982 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2dpv\" (UniqueName: \"kubernetes.io/projected/5f513b70-0094-4178-8d25-aefd4bb220f2-kube-api-access-t2dpv\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.394973 4800 generic.go:334] "Generic (PLEG): container finished" podID="5f513b70-0094-4178-8d25-aefd4bb220f2" containerID="0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828" exitCode=0 Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.395131 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sdg6" event={"ID":"5f513b70-0094-4178-8d25-aefd4bb220f2","Type":"ContainerDied","Data":"0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828"} Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.395327 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6sdg6" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.396655 4800 scope.go:117] "RemoveContainer" containerID="0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.396541 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sdg6" event={"ID":"5f513b70-0094-4178-8d25-aefd4bb220f2","Type":"ContainerDied","Data":"beef8c76b4b8184e9e6f672f37ad3c0e871114d6173c2b182dbfe389391d21eb"} Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.454873 4800 scope.go:117] "RemoveContainer" containerID="ae1addcffe968fca059f4f54ec3393fccb31040c0abbaa50d1bcaae2c0a8d390" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.454947 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6sdg6"] Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.465911 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6sdg6"] Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.491631 4800 scope.go:117] "RemoveContainer" containerID="0a55f747a833173c16b4c215b7de65b4ee4e9b0a44530ee2219d0a686d6f0a7f" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.537710 4800 scope.go:117] "RemoveContainer" containerID="0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828" Nov 25 17:07:24 crc kubenswrapper[4800]: E1125 17:07:24.538291 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828\": container with ID starting with 0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828 not found: ID does not exist" containerID="0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.538331 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828"} err="failed to get container status \"0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828\": rpc error: code = NotFound desc = could not find container \"0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828\": container with ID starting with 0151980a6bcecc20d3a16c41c69f7bc23e46b07b4e2cf367b4e3770e33d15828 not found: ID does not exist" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.538359 4800 scope.go:117] "RemoveContainer" containerID="ae1addcffe968fca059f4f54ec3393fccb31040c0abbaa50d1bcaae2c0a8d390" Nov 25 17:07:24 crc kubenswrapper[4800]: E1125 17:07:24.538948 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae1addcffe968fca059f4f54ec3393fccb31040c0abbaa50d1bcaae2c0a8d390\": container with ID starting with ae1addcffe968fca059f4f54ec3393fccb31040c0abbaa50d1bcaae2c0a8d390 not found: ID does not exist" containerID="ae1addcffe968fca059f4f54ec3393fccb31040c0abbaa50d1bcaae2c0a8d390" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.539011 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae1addcffe968fca059f4f54ec3393fccb31040c0abbaa50d1bcaae2c0a8d390"} err="failed to get container status \"ae1addcffe968fca059f4f54ec3393fccb31040c0abbaa50d1bcaae2c0a8d390\": rpc error: code = NotFound desc = could not find container \"ae1addcffe968fca059f4f54ec3393fccb31040c0abbaa50d1bcaae2c0a8d390\": container with ID starting with ae1addcffe968fca059f4f54ec3393fccb31040c0abbaa50d1bcaae2c0a8d390 not found: ID does not exist" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.539036 4800 scope.go:117] "RemoveContainer" containerID="0a55f747a833173c16b4c215b7de65b4ee4e9b0a44530ee2219d0a686d6f0a7f" Nov 25 17:07:24 crc kubenswrapper[4800]: E1125 17:07:24.539302 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a55f747a833173c16b4c215b7de65b4ee4e9b0a44530ee2219d0a686d6f0a7f\": container with ID starting with 0a55f747a833173c16b4c215b7de65b4ee4e9b0a44530ee2219d0a686d6f0a7f not found: ID does not exist" containerID="0a55f747a833173c16b4c215b7de65b4ee4e9b0a44530ee2219d0a686d6f0a7f" Nov 25 17:07:24 crc kubenswrapper[4800]: I1125 17:07:24.539326 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a55f747a833173c16b4c215b7de65b4ee4e9b0a44530ee2219d0a686d6f0a7f"} err="failed to get container status \"0a55f747a833173c16b4c215b7de65b4ee4e9b0a44530ee2219d0a686d6f0a7f\": rpc error: code = NotFound desc = could not find container \"0a55f747a833173c16b4c215b7de65b4ee4e9b0a44530ee2219d0a686d6f0a7f\": container with ID starting with 0a55f747a833173c16b4c215b7de65b4ee4e9b0a44530ee2219d0a686d6f0a7f not found: ID does not exist" Nov 25 17:07:25 crc kubenswrapper[4800]: I1125 17:07:25.798948 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f513b70-0094-4178-8d25-aefd4bb220f2" path="/var/lib/kubelet/pods/5f513b70-0094-4178-8d25-aefd4bb220f2/volumes" Nov 25 17:07:42 crc kubenswrapper[4800]: I1125 17:07:42.640948 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:07:42 crc kubenswrapper[4800]: I1125 17:07:42.641997 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:08:12 crc kubenswrapper[4800]: I1125 17:08:12.640355 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:08:12 crc kubenswrapper[4800]: I1125 17:08:12.641093 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:08:42 crc kubenswrapper[4800]: I1125 17:08:42.640286 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:08:42 crc kubenswrapper[4800]: I1125 17:08:42.640965 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:08:42 crc kubenswrapper[4800]: I1125 17:08:42.641010 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 17:08:42 crc kubenswrapper[4800]: I1125 17:08:42.641808 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:08:42 crc kubenswrapper[4800]: I1125 17:08:42.641895 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" gracePeriod=600 Nov 25 17:08:42 crc kubenswrapper[4800]: E1125 17:08:42.772105 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:08:43 crc kubenswrapper[4800]: I1125 17:08:43.155444 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" exitCode=0 Nov 25 17:08:43 crc kubenswrapper[4800]: I1125 17:08:43.155486 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a"} Nov 25 17:08:43 crc kubenswrapper[4800]: I1125 17:08:43.155521 4800 scope.go:117] "RemoveContainer" containerID="a39e9cc8c6bd4cbb42d4f311c2264ca465b7b5094e371dd1898f17961ac816fb" Nov 25 17:08:43 crc kubenswrapper[4800]: I1125 17:08:43.156378 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:08:43 crc kubenswrapper[4800]: E1125 17:08:43.156817 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:08:58 crc kubenswrapper[4800]: I1125 17:08:58.785754 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:08:58 crc kubenswrapper[4800]: E1125 17:08:58.786798 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:09:12 crc kubenswrapper[4800]: I1125 17:09:12.785005 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:09:12 crc kubenswrapper[4800]: E1125 17:09:12.785896 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:09:27 crc kubenswrapper[4800]: I1125 17:09:27.787762 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:09:27 crc kubenswrapper[4800]: E1125 17:09:27.789272 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:09:42 crc kubenswrapper[4800]: I1125 17:09:42.786301 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:09:42 crc kubenswrapper[4800]: E1125 17:09:42.787365 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:09:49 crc kubenswrapper[4800]: I1125 17:09:49.972247 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xxr2d"] Nov 25 17:09:49 crc kubenswrapper[4800]: E1125 17:09:49.973403 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f513b70-0094-4178-8d25-aefd4bb220f2" containerName="extract-content" Nov 25 17:09:49 crc kubenswrapper[4800]: I1125 17:09:49.973423 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f513b70-0094-4178-8d25-aefd4bb220f2" containerName="extract-content" Nov 25 17:09:49 crc kubenswrapper[4800]: E1125 17:09:49.973436 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b1bdd3-6e63-46da-bcad-b59a9fca8554" containerName="extract-content" Nov 25 17:09:49 crc kubenswrapper[4800]: I1125 17:09:49.973445 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b1bdd3-6e63-46da-bcad-b59a9fca8554" containerName="extract-content" Nov 25 17:09:49 crc kubenswrapper[4800]: E1125 17:09:49.973464 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f513b70-0094-4178-8d25-aefd4bb220f2" containerName="registry-server" Nov 25 17:09:49 crc kubenswrapper[4800]: I1125 17:09:49.973472 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f513b70-0094-4178-8d25-aefd4bb220f2" containerName="registry-server" Nov 25 17:09:49 crc kubenswrapper[4800]: E1125 17:09:49.973492 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b1bdd3-6e63-46da-bcad-b59a9fca8554" containerName="registry-server" Nov 25 17:09:49 crc kubenswrapper[4800]: I1125 17:09:49.973500 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b1bdd3-6e63-46da-bcad-b59a9fca8554" containerName="registry-server" Nov 25 17:09:49 crc kubenswrapper[4800]: E1125 17:09:49.973510 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b1bdd3-6e63-46da-bcad-b59a9fca8554" containerName="extract-utilities" Nov 25 17:09:49 crc kubenswrapper[4800]: I1125 17:09:49.973518 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b1bdd3-6e63-46da-bcad-b59a9fca8554" containerName="extract-utilities" Nov 25 17:09:49 crc kubenswrapper[4800]: E1125 17:09:49.973534 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f513b70-0094-4178-8d25-aefd4bb220f2" containerName="extract-utilities" Nov 25 17:09:49 crc kubenswrapper[4800]: I1125 17:09:49.973543 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f513b70-0094-4178-8d25-aefd4bb220f2" containerName="extract-utilities" Nov 25 17:09:49 crc kubenswrapper[4800]: I1125 17:09:49.973776 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="00b1bdd3-6e63-46da-bcad-b59a9fca8554" containerName="registry-server" Nov 25 17:09:49 crc kubenswrapper[4800]: I1125 17:09:49.973808 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f513b70-0094-4178-8d25-aefd4bb220f2" containerName="registry-server" Nov 25 17:09:49 crc kubenswrapper[4800]: I1125 17:09:49.976010 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:09:49 crc kubenswrapper[4800]: I1125 17:09:49.981700 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xxr2d"] Nov 25 17:09:50 crc kubenswrapper[4800]: I1125 17:09:50.088142 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49fdaf37-575d-4ea7-8028-2d636f36b582-catalog-content\") pod \"redhat-marketplace-xxr2d\" (UID: \"49fdaf37-575d-4ea7-8028-2d636f36b582\") " pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:09:50 crc kubenswrapper[4800]: I1125 17:09:50.088194 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49fdaf37-575d-4ea7-8028-2d636f36b582-utilities\") pod \"redhat-marketplace-xxr2d\" (UID: \"49fdaf37-575d-4ea7-8028-2d636f36b582\") " pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:09:50 crc kubenswrapper[4800]: I1125 17:09:50.088306 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvbtt\" (UniqueName: \"kubernetes.io/projected/49fdaf37-575d-4ea7-8028-2d636f36b582-kube-api-access-cvbtt\") pod \"redhat-marketplace-xxr2d\" (UID: \"49fdaf37-575d-4ea7-8028-2d636f36b582\") " pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:09:50 crc kubenswrapper[4800]: I1125 17:09:50.190405 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49fdaf37-575d-4ea7-8028-2d636f36b582-catalog-content\") pod \"redhat-marketplace-xxr2d\" (UID: \"49fdaf37-575d-4ea7-8028-2d636f36b582\") " pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:09:50 crc kubenswrapper[4800]: I1125 17:09:50.190450 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49fdaf37-575d-4ea7-8028-2d636f36b582-utilities\") pod \"redhat-marketplace-xxr2d\" (UID: \"49fdaf37-575d-4ea7-8028-2d636f36b582\") " pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:09:50 crc kubenswrapper[4800]: I1125 17:09:50.190512 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvbtt\" (UniqueName: \"kubernetes.io/projected/49fdaf37-575d-4ea7-8028-2d636f36b582-kube-api-access-cvbtt\") pod \"redhat-marketplace-xxr2d\" (UID: \"49fdaf37-575d-4ea7-8028-2d636f36b582\") " pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:09:50 crc kubenswrapper[4800]: I1125 17:09:50.191036 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49fdaf37-575d-4ea7-8028-2d636f36b582-catalog-content\") pod \"redhat-marketplace-xxr2d\" (UID: \"49fdaf37-575d-4ea7-8028-2d636f36b582\") " pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:09:50 crc kubenswrapper[4800]: I1125 17:09:50.191163 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49fdaf37-575d-4ea7-8028-2d636f36b582-utilities\") pod \"redhat-marketplace-xxr2d\" (UID: \"49fdaf37-575d-4ea7-8028-2d636f36b582\") " pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:09:50 crc kubenswrapper[4800]: I1125 17:09:50.225984 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvbtt\" (UniqueName: \"kubernetes.io/projected/49fdaf37-575d-4ea7-8028-2d636f36b582-kube-api-access-cvbtt\") pod \"redhat-marketplace-xxr2d\" (UID: \"49fdaf37-575d-4ea7-8028-2d636f36b582\") " pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:09:50 crc kubenswrapper[4800]: I1125 17:09:50.307761 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:09:50 crc kubenswrapper[4800]: I1125 17:09:50.765143 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xxr2d"] Nov 25 17:09:50 crc kubenswrapper[4800]: I1125 17:09:50.787747 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xxr2d" event={"ID":"49fdaf37-575d-4ea7-8028-2d636f36b582","Type":"ContainerStarted","Data":"7e36cf9a5016930660c818515691064fd52fe5c0700079947f2b1256c74367da"} Nov 25 17:09:51 crc kubenswrapper[4800]: I1125 17:09:51.799244 4800 generic.go:334] "Generic (PLEG): container finished" podID="49fdaf37-575d-4ea7-8028-2d636f36b582" containerID="0a81435792c4a7b242c1bbda71dc9bc52549d43af02e5e0117493df2aac25606" exitCode=0 Nov 25 17:09:51 crc kubenswrapper[4800]: I1125 17:09:51.799673 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xxr2d" event={"ID":"49fdaf37-575d-4ea7-8028-2d636f36b582","Type":"ContainerDied","Data":"0a81435792c4a7b242c1bbda71dc9bc52549d43af02e5e0117493df2aac25606"} Nov 25 17:09:52 crc kubenswrapper[4800]: I1125 17:09:52.807899 4800 generic.go:334] "Generic (PLEG): container finished" podID="49fdaf37-575d-4ea7-8028-2d636f36b582" containerID="2b46b54ff1bea79d4d45199f920a5cea8bc41ef35eab28341441c4a3d2b6457b" exitCode=0 Nov 25 17:09:52 crc kubenswrapper[4800]: I1125 17:09:52.808093 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xxr2d" event={"ID":"49fdaf37-575d-4ea7-8028-2d636f36b582","Type":"ContainerDied","Data":"2b46b54ff1bea79d4d45199f920a5cea8bc41ef35eab28341441c4a3d2b6457b"} Nov 25 17:09:53 crc kubenswrapper[4800]: I1125 17:09:53.819621 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xxr2d" event={"ID":"49fdaf37-575d-4ea7-8028-2d636f36b582","Type":"ContainerStarted","Data":"05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce"} Nov 25 17:09:53 crc kubenswrapper[4800]: I1125 17:09:53.844619 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xxr2d" podStartSLOduration=3.422377607 podStartE2EDuration="4.844603693s" podCreationTimestamp="2025-11-25 17:09:49 +0000 UTC" firstStartedPulling="2025-11-25 17:09:51.802201547 +0000 UTC m=+6752.856610029" lastFinishedPulling="2025-11-25 17:09:53.224427633 +0000 UTC m=+6754.278836115" observedRunningTime="2025-11-25 17:09:53.843898854 +0000 UTC m=+6754.898307346" watchObservedRunningTime="2025-11-25 17:09:53.844603693 +0000 UTC m=+6754.899012175" Nov 25 17:09:55 crc kubenswrapper[4800]: I1125 17:09:55.785951 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:09:55 crc kubenswrapper[4800]: E1125 17:09:55.786567 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:10:00 crc kubenswrapper[4800]: I1125 17:10:00.308104 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:10:00 crc kubenswrapper[4800]: I1125 17:10:00.308536 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:10:00 crc kubenswrapper[4800]: I1125 17:10:00.377570 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:10:00 crc kubenswrapper[4800]: I1125 17:10:00.968027 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:10:01 crc kubenswrapper[4800]: I1125 17:10:01.022748 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xxr2d"] Nov 25 17:10:02 crc kubenswrapper[4800]: I1125 17:10:02.932382 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xxr2d" podUID="49fdaf37-575d-4ea7-8028-2d636f36b582" containerName="registry-server" containerID="cri-o://05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce" gracePeriod=2 Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.498960 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.610209 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49fdaf37-575d-4ea7-8028-2d636f36b582-utilities\") pod \"49fdaf37-575d-4ea7-8028-2d636f36b582\" (UID: \"49fdaf37-575d-4ea7-8028-2d636f36b582\") " Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.610278 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49fdaf37-575d-4ea7-8028-2d636f36b582-catalog-content\") pod \"49fdaf37-575d-4ea7-8028-2d636f36b582\" (UID: \"49fdaf37-575d-4ea7-8028-2d636f36b582\") " Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.611302 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49fdaf37-575d-4ea7-8028-2d636f36b582-utilities" (OuterVolumeSpecName: "utilities") pod "49fdaf37-575d-4ea7-8028-2d636f36b582" (UID: "49fdaf37-575d-4ea7-8028-2d636f36b582"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.610832 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvbtt\" (UniqueName: \"kubernetes.io/projected/49fdaf37-575d-4ea7-8028-2d636f36b582-kube-api-access-cvbtt\") pod \"49fdaf37-575d-4ea7-8028-2d636f36b582\" (UID: \"49fdaf37-575d-4ea7-8028-2d636f36b582\") " Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.612058 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49fdaf37-575d-4ea7-8028-2d636f36b582-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.617444 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49fdaf37-575d-4ea7-8028-2d636f36b582-kube-api-access-cvbtt" (OuterVolumeSpecName: "kube-api-access-cvbtt") pod "49fdaf37-575d-4ea7-8028-2d636f36b582" (UID: "49fdaf37-575d-4ea7-8028-2d636f36b582"). InnerVolumeSpecName "kube-api-access-cvbtt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.635640 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49fdaf37-575d-4ea7-8028-2d636f36b582-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "49fdaf37-575d-4ea7-8028-2d636f36b582" (UID: "49fdaf37-575d-4ea7-8028-2d636f36b582"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.714492 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49fdaf37-575d-4ea7-8028-2d636f36b582-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.714537 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvbtt\" (UniqueName: \"kubernetes.io/projected/49fdaf37-575d-4ea7-8028-2d636f36b582-kube-api-access-cvbtt\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.941614 4800 generic.go:334] "Generic (PLEG): container finished" podID="49fdaf37-575d-4ea7-8028-2d636f36b582" containerID="05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce" exitCode=0 Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.941653 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xxr2d" event={"ID":"49fdaf37-575d-4ea7-8028-2d636f36b582","Type":"ContainerDied","Data":"05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce"} Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.941678 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xxr2d" event={"ID":"49fdaf37-575d-4ea7-8028-2d636f36b582","Type":"ContainerDied","Data":"7e36cf9a5016930660c818515691064fd52fe5c0700079947f2b1256c74367da"} Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.941694 4800 scope.go:117] "RemoveContainer" containerID="05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce" Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.941798 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xxr2d" Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.972658 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xxr2d"] Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.975719 4800 scope.go:117] "RemoveContainer" containerID="2b46b54ff1bea79d4d45199f920a5cea8bc41ef35eab28341441c4a3d2b6457b" Nov 25 17:10:03 crc kubenswrapper[4800]: I1125 17:10:03.982370 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xxr2d"] Nov 25 17:10:04 crc kubenswrapper[4800]: I1125 17:10:03.999421 4800 scope.go:117] "RemoveContainer" containerID="0a81435792c4a7b242c1bbda71dc9bc52549d43af02e5e0117493df2aac25606" Nov 25 17:10:04 crc kubenswrapper[4800]: I1125 17:10:04.042035 4800 scope.go:117] "RemoveContainer" containerID="05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce" Nov 25 17:10:04 crc kubenswrapper[4800]: E1125 17:10:04.042492 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce\": container with ID starting with 05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce not found: ID does not exist" containerID="05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce" Nov 25 17:10:04 crc kubenswrapper[4800]: I1125 17:10:04.042536 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce"} err="failed to get container status \"05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce\": rpc error: code = NotFound desc = could not find container \"05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce\": container with ID starting with 05a705d4bd4fcb4101a7a01008d339c86e81651fa0a3d74baf047f1dbc88d5ce not found: ID does not exist" Nov 25 17:10:04 crc kubenswrapper[4800]: I1125 17:10:04.042563 4800 scope.go:117] "RemoveContainer" containerID="2b46b54ff1bea79d4d45199f920a5cea8bc41ef35eab28341441c4a3d2b6457b" Nov 25 17:10:04 crc kubenswrapper[4800]: E1125 17:10:04.042987 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b46b54ff1bea79d4d45199f920a5cea8bc41ef35eab28341441c4a3d2b6457b\": container with ID starting with 2b46b54ff1bea79d4d45199f920a5cea8bc41ef35eab28341441c4a3d2b6457b not found: ID does not exist" containerID="2b46b54ff1bea79d4d45199f920a5cea8bc41ef35eab28341441c4a3d2b6457b" Nov 25 17:10:04 crc kubenswrapper[4800]: I1125 17:10:04.043034 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b46b54ff1bea79d4d45199f920a5cea8bc41ef35eab28341441c4a3d2b6457b"} err="failed to get container status \"2b46b54ff1bea79d4d45199f920a5cea8bc41ef35eab28341441c4a3d2b6457b\": rpc error: code = NotFound desc = could not find container \"2b46b54ff1bea79d4d45199f920a5cea8bc41ef35eab28341441c4a3d2b6457b\": container with ID starting with 2b46b54ff1bea79d4d45199f920a5cea8bc41ef35eab28341441c4a3d2b6457b not found: ID does not exist" Nov 25 17:10:04 crc kubenswrapper[4800]: I1125 17:10:04.043095 4800 scope.go:117] "RemoveContainer" containerID="0a81435792c4a7b242c1bbda71dc9bc52549d43af02e5e0117493df2aac25606" Nov 25 17:10:04 crc kubenswrapper[4800]: E1125 17:10:04.043422 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a81435792c4a7b242c1bbda71dc9bc52549d43af02e5e0117493df2aac25606\": container with ID starting with 0a81435792c4a7b242c1bbda71dc9bc52549d43af02e5e0117493df2aac25606 not found: ID does not exist" containerID="0a81435792c4a7b242c1bbda71dc9bc52549d43af02e5e0117493df2aac25606" Nov 25 17:10:04 crc kubenswrapper[4800]: I1125 17:10:04.043450 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a81435792c4a7b242c1bbda71dc9bc52549d43af02e5e0117493df2aac25606"} err="failed to get container status \"0a81435792c4a7b242c1bbda71dc9bc52549d43af02e5e0117493df2aac25606\": rpc error: code = NotFound desc = could not find container \"0a81435792c4a7b242c1bbda71dc9bc52549d43af02e5e0117493df2aac25606\": container with ID starting with 0a81435792c4a7b242c1bbda71dc9bc52549d43af02e5e0117493df2aac25606 not found: ID does not exist" Nov 25 17:10:05 crc kubenswrapper[4800]: I1125 17:10:05.805059 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49fdaf37-575d-4ea7-8028-2d636f36b582" path="/var/lib/kubelet/pods/49fdaf37-575d-4ea7-8028-2d636f36b582/volumes" Nov 25 17:10:10 crc kubenswrapper[4800]: I1125 17:10:10.786817 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:10:10 crc kubenswrapper[4800]: E1125 17:10:10.788293 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:10:24 crc kubenswrapper[4800]: I1125 17:10:24.786188 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:10:24 crc kubenswrapper[4800]: E1125 17:10:24.787328 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.620702 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pdrjm"] Nov 25 17:10:26 crc kubenswrapper[4800]: E1125 17:10:26.621749 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49fdaf37-575d-4ea7-8028-2d636f36b582" containerName="extract-utilities" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.621766 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="49fdaf37-575d-4ea7-8028-2d636f36b582" containerName="extract-utilities" Nov 25 17:10:26 crc kubenswrapper[4800]: E1125 17:10:26.621779 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49fdaf37-575d-4ea7-8028-2d636f36b582" containerName="registry-server" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.621785 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="49fdaf37-575d-4ea7-8028-2d636f36b582" containerName="registry-server" Nov 25 17:10:26 crc kubenswrapper[4800]: E1125 17:10:26.621800 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49fdaf37-575d-4ea7-8028-2d636f36b582" containerName="extract-content" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.621808 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="49fdaf37-575d-4ea7-8028-2d636f36b582" containerName="extract-content" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.622063 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="49fdaf37-575d-4ea7-8028-2d636f36b582" containerName="registry-server" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.623443 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.637385 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pdrjm"] Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.702115 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hwp9\" (UniqueName: \"kubernetes.io/projected/7b3dd583-87db-4702-809e-3ee250ded195-kube-api-access-6hwp9\") pod \"certified-operators-pdrjm\" (UID: \"7b3dd583-87db-4702-809e-3ee250ded195\") " pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.702155 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b3dd583-87db-4702-809e-3ee250ded195-catalog-content\") pod \"certified-operators-pdrjm\" (UID: \"7b3dd583-87db-4702-809e-3ee250ded195\") " pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.702182 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b3dd583-87db-4702-809e-3ee250ded195-utilities\") pod \"certified-operators-pdrjm\" (UID: \"7b3dd583-87db-4702-809e-3ee250ded195\") " pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.804097 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hwp9\" (UniqueName: \"kubernetes.io/projected/7b3dd583-87db-4702-809e-3ee250ded195-kube-api-access-6hwp9\") pod \"certified-operators-pdrjm\" (UID: \"7b3dd583-87db-4702-809e-3ee250ded195\") " pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.804146 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b3dd583-87db-4702-809e-3ee250ded195-catalog-content\") pod \"certified-operators-pdrjm\" (UID: \"7b3dd583-87db-4702-809e-3ee250ded195\") " pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.804174 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b3dd583-87db-4702-809e-3ee250ded195-utilities\") pod \"certified-operators-pdrjm\" (UID: \"7b3dd583-87db-4702-809e-3ee250ded195\") " pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.804712 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b3dd583-87db-4702-809e-3ee250ded195-catalog-content\") pod \"certified-operators-pdrjm\" (UID: \"7b3dd583-87db-4702-809e-3ee250ded195\") " pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.804779 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b3dd583-87db-4702-809e-3ee250ded195-utilities\") pod \"certified-operators-pdrjm\" (UID: \"7b3dd583-87db-4702-809e-3ee250ded195\") " pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.825301 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hwp9\" (UniqueName: \"kubernetes.io/projected/7b3dd583-87db-4702-809e-3ee250ded195-kube-api-access-6hwp9\") pod \"certified-operators-pdrjm\" (UID: \"7b3dd583-87db-4702-809e-3ee250ded195\") " pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:26 crc kubenswrapper[4800]: I1125 17:10:26.942993 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:27 crc kubenswrapper[4800]: I1125 17:10:27.484461 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pdrjm"] Nov 25 17:10:27 crc kubenswrapper[4800]: E1125 17:10:27.926593 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7b3dd583_87db_4702_809e_3ee250ded195.slice/crio-conmon-7be1ee778d590253af4507ce5640b8893bd8283683fe0a855df9d2f93baf5d2d.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:10:28 crc kubenswrapper[4800]: I1125 17:10:28.175765 4800 generic.go:334] "Generic (PLEG): container finished" podID="7b3dd583-87db-4702-809e-3ee250ded195" containerID="7be1ee778d590253af4507ce5640b8893bd8283683fe0a855df9d2f93baf5d2d" exitCode=0 Nov 25 17:10:28 crc kubenswrapper[4800]: I1125 17:10:28.175824 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdrjm" event={"ID":"7b3dd583-87db-4702-809e-3ee250ded195","Type":"ContainerDied","Data":"7be1ee778d590253af4507ce5640b8893bd8283683fe0a855df9d2f93baf5d2d"} Nov 25 17:10:28 crc kubenswrapper[4800]: I1125 17:10:28.175890 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdrjm" event={"ID":"7b3dd583-87db-4702-809e-3ee250ded195","Type":"ContainerStarted","Data":"12de7c90ed7a8ac534aa94eb7aa610f1a4925bd0d86048d506d61d68533c8912"} Nov 25 17:10:30 crc kubenswrapper[4800]: I1125 17:10:30.196280 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdrjm" event={"ID":"7b3dd583-87db-4702-809e-3ee250ded195","Type":"ContainerStarted","Data":"f331255777b44ab2decf67ddca3becdad858be24244140b1bdfb535f1d112681"} Nov 25 17:10:31 crc kubenswrapper[4800]: I1125 17:10:31.207194 4800 generic.go:334] "Generic (PLEG): container finished" podID="7b3dd583-87db-4702-809e-3ee250ded195" containerID="f331255777b44ab2decf67ddca3becdad858be24244140b1bdfb535f1d112681" exitCode=0 Nov 25 17:10:31 crc kubenswrapper[4800]: I1125 17:10:31.207292 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdrjm" event={"ID":"7b3dd583-87db-4702-809e-3ee250ded195","Type":"ContainerDied","Data":"f331255777b44ab2decf67ddca3becdad858be24244140b1bdfb535f1d112681"} Nov 25 17:10:32 crc kubenswrapper[4800]: I1125 17:10:32.217703 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdrjm" event={"ID":"7b3dd583-87db-4702-809e-3ee250ded195","Type":"ContainerStarted","Data":"9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c"} Nov 25 17:10:32 crc kubenswrapper[4800]: I1125 17:10:32.242026 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pdrjm" podStartSLOduration=2.775061559 podStartE2EDuration="6.242002565s" podCreationTimestamp="2025-11-25 17:10:26 +0000 UTC" firstStartedPulling="2025-11-25 17:10:28.179230463 +0000 UTC m=+6789.233638945" lastFinishedPulling="2025-11-25 17:10:31.646171429 +0000 UTC m=+6792.700579951" observedRunningTime="2025-11-25 17:10:32.236283229 +0000 UTC m=+6793.290691721" watchObservedRunningTime="2025-11-25 17:10:32.242002565 +0000 UTC m=+6793.296411047" Nov 25 17:10:35 crc kubenswrapper[4800]: I1125 17:10:35.786174 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:10:35 crc kubenswrapper[4800]: E1125 17:10:35.787246 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:10:36 crc kubenswrapper[4800]: I1125 17:10:36.943464 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:36 crc kubenswrapper[4800]: I1125 17:10:36.943769 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:37 crc kubenswrapper[4800]: I1125 17:10:37.004768 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:37 crc kubenswrapper[4800]: I1125 17:10:37.308590 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:37 crc kubenswrapper[4800]: I1125 17:10:37.373441 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pdrjm"] Nov 25 17:10:39 crc kubenswrapper[4800]: I1125 17:10:39.307146 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pdrjm" podUID="7b3dd583-87db-4702-809e-3ee250ded195" containerName="registry-server" containerID="cri-o://9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c" gracePeriod=2 Nov 25 17:10:39 crc kubenswrapper[4800]: I1125 17:10:39.847787 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:39 crc kubenswrapper[4800]: I1125 17:10:39.980107 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b3dd583-87db-4702-809e-3ee250ded195-catalog-content\") pod \"7b3dd583-87db-4702-809e-3ee250ded195\" (UID: \"7b3dd583-87db-4702-809e-3ee250ded195\") " Nov 25 17:10:39 crc kubenswrapper[4800]: I1125 17:10:39.980283 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hwp9\" (UniqueName: \"kubernetes.io/projected/7b3dd583-87db-4702-809e-3ee250ded195-kube-api-access-6hwp9\") pod \"7b3dd583-87db-4702-809e-3ee250ded195\" (UID: \"7b3dd583-87db-4702-809e-3ee250ded195\") " Nov 25 17:10:39 crc kubenswrapper[4800]: I1125 17:10:39.980393 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b3dd583-87db-4702-809e-3ee250ded195-utilities\") pod \"7b3dd583-87db-4702-809e-3ee250ded195\" (UID: \"7b3dd583-87db-4702-809e-3ee250ded195\") " Nov 25 17:10:39 crc kubenswrapper[4800]: I1125 17:10:39.981411 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b3dd583-87db-4702-809e-3ee250ded195-utilities" (OuterVolumeSpecName: "utilities") pod "7b3dd583-87db-4702-809e-3ee250ded195" (UID: "7b3dd583-87db-4702-809e-3ee250ded195"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:10:39 crc kubenswrapper[4800]: I1125 17:10:39.994283 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b3dd583-87db-4702-809e-3ee250ded195-kube-api-access-6hwp9" (OuterVolumeSpecName: "kube-api-access-6hwp9") pod "7b3dd583-87db-4702-809e-3ee250ded195" (UID: "7b3dd583-87db-4702-809e-3ee250ded195"). InnerVolumeSpecName "kube-api-access-6hwp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.074787 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b3dd583-87db-4702-809e-3ee250ded195-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7b3dd583-87db-4702-809e-3ee250ded195" (UID: "7b3dd583-87db-4702-809e-3ee250ded195"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.083122 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b3dd583-87db-4702-809e-3ee250ded195-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.083470 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hwp9\" (UniqueName: \"kubernetes.io/projected/7b3dd583-87db-4702-809e-3ee250ded195-kube-api-access-6hwp9\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.083585 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b3dd583-87db-4702-809e-3ee250ded195-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.320718 4800 generic.go:334] "Generic (PLEG): container finished" podID="7b3dd583-87db-4702-809e-3ee250ded195" containerID="9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c" exitCode=0 Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.320757 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdrjm" event={"ID":"7b3dd583-87db-4702-809e-3ee250ded195","Type":"ContainerDied","Data":"9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c"} Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.320783 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdrjm" event={"ID":"7b3dd583-87db-4702-809e-3ee250ded195","Type":"ContainerDied","Data":"12de7c90ed7a8ac534aa94eb7aa610f1a4925bd0d86048d506d61d68533c8912"} Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.320800 4800 scope.go:117] "RemoveContainer" containerID="9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.320884 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdrjm" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.374206 4800 scope.go:117] "RemoveContainer" containerID="f331255777b44ab2decf67ddca3becdad858be24244140b1bdfb535f1d112681" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.388943 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pdrjm"] Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.397641 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pdrjm"] Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.409274 4800 scope.go:117] "RemoveContainer" containerID="7be1ee778d590253af4507ce5640b8893bd8283683fe0a855df9d2f93baf5d2d" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.458403 4800 scope.go:117] "RemoveContainer" containerID="9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c" Nov 25 17:10:40 crc kubenswrapper[4800]: E1125 17:10:40.459000 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c\": container with ID starting with 9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c not found: ID does not exist" containerID="9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.459051 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c"} err="failed to get container status \"9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c\": rpc error: code = NotFound desc = could not find container \"9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c\": container with ID starting with 9604fd8b47d3fd774e6c771af2312814736bbc1e7b8eb010c561e8ef467c803c not found: ID does not exist" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.459084 4800 scope.go:117] "RemoveContainer" containerID="f331255777b44ab2decf67ddca3becdad858be24244140b1bdfb535f1d112681" Nov 25 17:10:40 crc kubenswrapper[4800]: E1125 17:10:40.460128 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f331255777b44ab2decf67ddca3becdad858be24244140b1bdfb535f1d112681\": container with ID starting with f331255777b44ab2decf67ddca3becdad858be24244140b1bdfb535f1d112681 not found: ID does not exist" containerID="f331255777b44ab2decf67ddca3becdad858be24244140b1bdfb535f1d112681" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.460178 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f331255777b44ab2decf67ddca3becdad858be24244140b1bdfb535f1d112681"} err="failed to get container status \"f331255777b44ab2decf67ddca3becdad858be24244140b1bdfb535f1d112681\": rpc error: code = NotFound desc = could not find container \"f331255777b44ab2decf67ddca3becdad858be24244140b1bdfb535f1d112681\": container with ID starting with f331255777b44ab2decf67ddca3becdad858be24244140b1bdfb535f1d112681 not found: ID does not exist" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.460209 4800 scope.go:117] "RemoveContainer" containerID="7be1ee778d590253af4507ce5640b8893bd8283683fe0a855df9d2f93baf5d2d" Nov 25 17:10:40 crc kubenswrapper[4800]: E1125 17:10:40.461188 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7be1ee778d590253af4507ce5640b8893bd8283683fe0a855df9d2f93baf5d2d\": container with ID starting with 7be1ee778d590253af4507ce5640b8893bd8283683fe0a855df9d2f93baf5d2d not found: ID does not exist" containerID="7be1ee778d590253af4507ce5640b8893bd8283683fe0a855df9d2f93baf5d2d" Nov 25 17:10:40 crc kubenswrapper[4800]: I1125 17:10:40.461227 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7be1ee778d590253af4507ce5640b8893bd8283683fe0a855df9d2f93baf5d2d"} err="failed to get container status \"7be1ee778d590253af4507ce5640b8893bd8283683fe0a855df9d2f93baf5d2d\": rpc error: code = NotFound desc = could not find container \"7be1ee778d590253af4507ce5640b8893bd8283683fe0a855df9d2f93baf5d2d\": container with ID starting with 7be1ee778d590253af4507ce5640b8893bd8283683fe0a855df9d2f93baf5d2d not found: ID does not exist" Nov 25 17:10:41 crc kubenswrapper[4800]: I1125 17:10:41.799660 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b3dd583-87db-4702-809e-3ee250ded195" path="/var/lib/kubelet/pods/7b3dd583-87db-4702-809e-3ee250ded195/volumes" Nov 25 17:10:49 crc kubenswrapper[4800]: I1125 17:10:49.800267 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:10:49 crc kubenswrapper[4800]: E1125 17:10:49.801549 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:11:00 crc kubenswrapper[4800]: I1125 17:11:00.785472 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:11:00 crc kubenswrapper[4800]: E1125 17:11:00.786548 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:11:11 crc kubenswrapper[4800]: I1125 17:11:11.785775 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:11:11 crc kubenswrapper[4800]: E1125 17:11:11.786786 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:11:26 crc kubenswrapper[4800]: I1125 17:11:26.785839 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:11:26 crc kubenswrapper[4800]: E1125 17:11:26.786706 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:11:39 crc kubenswrapper[4800]: I1125 17:11:39.785484 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:11:39 crc kubenswrapper[4800]: E1125 17:11:39.786365 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:11:51 crc kubenswrapper[4800]: I1125 17:11:51.786008 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:11:51 crc kubenswrapper[4800]: E1125 17:11:51.787066 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:12:06 crc kubenswrapper[4800]: I1125 17:12:06.785261 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:12:06 crc kubenswrapper[4800]: E1125 17:12:06.785977 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:12:18 crc kubenswrapper[4800]: I1125 17:12:18.786009 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:12:18 crc kubenswrapper[4800]: E1125 17:12:18.786899 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:12:30 crc kubenswrapper[4800]: I1125 17:12:30.785949 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:12:30 crc kubenswrapper[4800]: E1125 17:12:30.786921 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:12:41 crc kubenswrapper[4800]: I1125 17:12:41.785960 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:12:41 crc kubenswrapper[4800]: E1125 17:12:41.787055 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:12:53 crc kubenswrapper[4800]: I1125 17:12:53.786025 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:12:53 crc kubenswrapper[4800]: E1125 17:12:53.787793 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:13:05 crc kubenswrapper[4800]: I1125 17:13:05.785974 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:13:05 crc kubenswrapper[4800]: E1125 17:13:05.787233 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:13:17 crc kubenswrapper[4800]: I1125 17:13:17.785696 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:13:17 crc kubenswrapper[4800]: E1125 17:13:17.786558 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:13:30 crc kubenswrapper[4800]: I1125 17:13:30.786095 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:13:30 crc kubenswrapper[4800]: E1125 17:13:30.786778 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:13:45 crc kubenswrapper[4800]: I1125 17:13:45.786281 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:13:46 crc kubenswrapper[4800]: I1125 17:13:46.123575 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"8e9e97e93a63f48db0bb25586ec236ba4713e3250e9bda953e77defe1f728f5b"} Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.151023 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr"] Nov 25 17:15:00 crc kubenswrapper[4800]: E1125 17:15:00.152112 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b3dd583-87db-4702-809e-3ee250ded195" containerName="registry-server" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.152130 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b3dd583-87db-4702-809e-3ee250ded195" containerName="registry-server" Nov 25 17:15:00 crc kubenswrapper[4800]: E1125 17:15:00.152147 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b3dd583-87db-4702-809e-3ee250ded195" containerName="extract-utilities" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.152155 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b3dd583-87db-4702-809e-3ee250ded195" containerName="extract-utilities" Nov 25 17:15:00 crc kubenswrapper[4800]: E1125 17:15:00.152171 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b3dd583-87db-4702-809e-3ee250ded195" containerName="extract-content" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.152179 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b3dd583-87db-4702-809e-3ee250ded195" containerName="extract-content" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.152423 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b3dd583-87db-4702-809e-3ee250ded195" containerName="registry-server" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.153164 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.155579 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.155719 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.163911 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr"] Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.263636 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-secret-volume\") pod \"collect-profiles-29401515-blrcr\" (UID: \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.263691 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4vxq\" (UniqueName: \"kubernetes.io/projected/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-kube-api-access-f4vxq\") pod \"collect-profiles-29401515-blrcr\" (UID: \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.263776 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-config-volume\") pod \"collect-profiles-29401515-blrcr\" (UID: \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.366702 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-secret-volume\") pod \"collect-profiles-29401515-blrcr\" (UID: \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.366792 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4vxq\" (UniqueName: \"kubernetes.io/projected/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-kube-api-access-f4vxq\") pod \"collect-profiles-29401515-blrcr\" (UID: \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.366965 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-config-volume\") pod \"collect-profiles-29401515-blrcr\" (UID: \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.368144 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-config-volume\") pod \"collect-profiles-29401515-blrcr\" (UID: \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.373454 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-secret-volume\") pod \"collect-profiles-29401515-blrcr\" (UID: \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.383578 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4vxq\" (UniqueName: \"kubernetes.io/projected/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-kube-api-access-f4vxq\") pod \"collect-profiles-29401515-blrcr\" (UID: \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.478379 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:00 crc kubenswrapper[4800]: I1125 17:15:00.951092 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr"] Nov 25 17:15:01 crc kubenswrapper[4800]: I1125 17:15:01.902178 4800 generic.go:334] "Generic (PLEG): container finished" podID="f8c31fb7-bab0-4f45-910d-9c5612b0f83e" containerID="cc4a10c0c81792eb69432245bf7ccb5d799f6f4fc6693e6bb774bea230918ef8" exitCode=0 Nov 25 17:15:01 crc kubenswrapper[4800]: I1125 17:15:01.902412 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" event={"ID":"f8c31fb7-bab0-4f45-910d-9c5612b0f83e","Type":"ContainerDied","Data":"cc4a10c0c81792eb69432245bf7ccb5d799f6f4fc6693e6bb774bea230918ef8"} Nov 25 17:15:01 crc kubenswrapper[4800]: I1125 17:15:01.902771 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" event={"ID":"f8c31fb7-bab0-4f45-910d-9c5612b0f83e","Type":"ContainerStarted","Data":"bf7cce4207814e2eefe79878fd7cc9275c6b54a8b75d79eeab7352ae19aa2441"} Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.407809 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.526524 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4vxq\" (UniqueName: \"kubernetes.io/projected/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-kube-api-access-f4vxq\") pod \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\" (UID: \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\") " Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.526629 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-secret-volume\") pod \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\" (UID: \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\") " Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.526817 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-config-volume\") pod \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\" (UID: \"f8c31fb7-bab0-4f45-910d-9c5612b0f83e\") " Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.527342 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-config-volume" (OuterVolumeSpecName: "config-volume") pod "f8c31fb7-bab0-4f45-910d-9c5612b0f83e" (UID: "f8c31fb7-bab0-4f45-910d-9c5612b0f83e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.527946 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.535020 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f8c31fb7-bab0-4f45-910d-9c5612b0f83e" (UID: "f8c31fb7-bab0-4f45-910d-9c5612b0f83e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.535694 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-kube-api-access-f4vxq" (OuterVolumeSpecName: "kube-api-access-f4vxq") pod "f8c31fb7-bab0-4f45-910d-9c5612b0f83e" (UID: "f8c31fb7-bab0-4f45-910d-9c5612b0f83e"). InnerVolumeSpecName "kube-api-access-f4vxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.629899 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4vxq\" (UniqueName: \"kubernetes.io/projected/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-kube-api-access-f4vxq\") on node \"crc\" DevicePath \"\"" Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.629935 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8c31fb7-bab0-4f45-910d-9c5612b0f83e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.928940 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" event={"ID":"f8c31fb7-bab0-4f45-910d-9c5612b0f83e","Type":"ContainerDied","Data":"bf7cce4207814e2eefe79878fd7cc9275c6b54a8b75d79eeab7352ae19aa2441"} Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.928987 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf7cce4207814e2eefe79878fd7cc9275c6b54a8b75d79eeab7352ae19aa2441" Nov 25 17:15:03 crc kubenswrapper[4800]: I1125 17:15:03.929031 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr" Nov 25 17:15:04 crc kubenswrapper[4800]: I1125 17:15:04.485887 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg"] Nov 25 17:15:04 crc kubenswrapper[4800]: I1125 17:15:04.513712 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401470-lnhmg"] Nov 25 17:15:05 crc kubenswrapper[4800]: I1125 17:15:05.796530 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0e025a4-4be8-4e65-b0d4-f45a77bbad73" path="/var/lib/kubelet/pods/b0e025a4-4be8-4e65-b0d4-f45a77bbad73/volumes" Nov 25 17:15:33 crc kubenswrapper[4800]: I1125 17:15:33.171925 4800 scope.go:117] "RemoveContainer" containerID="710eda65a0f3236b92b5fa57ecc1200a17c540392a0c527f596049207d65cab0" Nov 25 17:16:12 crc kubenswrapper[4800]: I1125 17:16:12.640543 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:16:12 crc kubenswrapper[4800]: I1125 17:16:12.641321 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:16:42 crc kubenswrapper[4800]: I1125 17:16:42.643052 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:16:42 crc kubenswrapper[4800]: I1125 17:16:42.643599 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:17:12 crc kubenswrapper[4800]: I1125 17:17:12.640291 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:17:12 crc kubenswrapper[4800]: I1125 17:17:12.640973 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:17:12 crc kubenswrapper[4800]: I1125 17:17:12.641034 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 17:17:12 crc kubenswrapper[4800]: I1125 17:17:12.641976 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8e9e97e93a63f48db0bb25586ec236ba4713e3250e9bda953e77defe1f728f5b"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:17:12 crc kubenswrapper[4800]: I1125 17:17:12.642071 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://8e9e97e93a63f48db0bb25586ec236ba4713e3250e9bda953e77defe1f728f5b" gracePeriod=600 Nov 25 17:17:13 crc kubenswrapper[4800]: I1125 17:17:13.247145 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="8e9e97e93a63f48db0bb25586ec236ba4713e3250e9bda953e77defe1f728f5b" exitCode=0 Nov 25 17:17:13 crc kubenswrapper[4800]: I1125 17:17:13.247233 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"8e9e97e93a63f48db0bb25586ec236ba4713e3250e9bda953e77defe1f728f5b"} Nov 25 17:17:13 crc kubenswrapper[4800]: I1125 17:17:13.247451 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d"} Nov 25 17:17:13 crc kubenswrapper[4800]: I1125 17:17:13.247474 4800 scope.go:117] "RemoveContainer" containerID="ea71787831791057aad8e2e55daccf1cc129958d17607f6af75ebcf481e6eb8a" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.508311 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-z7krd"] Nov 25 17:17:24 crc kubenswrapper[4800]: E1125 17:17:24.509071 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8c31fb7-bab0-4f45-910d-9c5612b0f83e" containerName="collect-profiles" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.509085 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8c31fb7-bab0-4f45-910d-9c5612b0f83e" containerName="collect-profiles" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.509261 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8c31fb7-bab0-4f45-910d-9c5612b0f83e" containerName="collect-profiles" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.510529 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.527287 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-z7krd"] Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.592630 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcchq\" (UniqueName: \"kubernetes.io/projected/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-kube-api-access-kcchq\") pod \"redhat-operators-z7krd\" (UID: \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\") " pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.593068 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-utilities\") pod \"redhat-operators-z7krd\" (UID: \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\") " pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.593137 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-catalog-content\") pod \"redhat-operators-z7krd\" (UID: \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\") " pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.695143 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-utilities\") pod \"redhat-operators-z7krd\" (UID: \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\") " pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.695448 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-catalog-content\") pod \"redhat-operators-z7krd\" (UID: \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\") " pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.695609 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcchq\" (UniqueName: \"kubernetes.io/projected/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-kube-api-access-kcchq\") pod \"redhat-operators-z7krd\" (UID: \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\") " pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.695776 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-utilities\") pod \"redhat-operators-z7krd\" (UID: \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\") " pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.695816 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-catalog-content\") pod \"redhat-operators-z7krd\" (UID: \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\") " pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.717380 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcchq\" (UniqueName: \"kubernetes.io/projected/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-kube-api-access-kcchq\") pod \"redhat-operators-z7krd\" (UID: \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\") " pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:24 crc kubenswrapper[4800]: I1125 17:17:24.871649 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:25 crc kubenswrapper[4800]: I1125 17:17:25.409547 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-z7krd"] Nov 25 17:17:26 crc kubenswrapper[4800]: I1125 17:17:26.383106 4800 generic.go:334] "Generic (PLEG): container finished" podID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" containerID="168ca069111ad7307a88f1c892615e197d5d8e8808dca4418d84bab60a338496" exitCode=0 Nov 25 17:17:26 crc kubenswrapper[4800]: I1125 17:17:26.383390 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z7krd" event={"ID":"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7","Type":"ContainerDied","Data":"168ca069111ad7307a88f1c892615e197d5d8e8808dca4418d84bab60a338496"} Nov 25 17:17:26 crc kubenswrapper[4800]: I1125 17:17:26.383424 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z7krd" event={"ID":"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7","Type":"ContainerStarted","Data":"391b6ff7d8bf747c68443bf5d0f7482f32f7e852cbe568e000d200d938828252"} Nov 25 17:17:26 crc kubenswrapper[4800]: I1125 17:17:26.386464 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:17:28 crc kubenswrapper[4800]: I1125 17:17:28.401353 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z7krd" event={"ID":"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7","Type":"ContainerStarted","Data":"8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398"} Nov 25 17:17:28 crc kubenswrapper[4800]: E1125 17:17:28.597221 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e11a4d1_bc5a_4bab_aec3_8a97aba598f7.slice/crio-8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:17:30 crc kubenswrapper[4800]: I1125 17:17:30.425125 4800 generic.go:334] "Generic (PLEG): container finished" podID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" containerID="8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398" exitCode=0 Nov 25 17:17:30 crc kubenswrapper[4800]: I1125 17:17:30.425198 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z7krd" event={"ID":"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7","Type":"ContainerDied","Data":"8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398"} Nov 25 17:17:31 crc kubenswrapper[4800]: I1125 17:17:31.440228 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z7krd" event={"ID":"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7","Type":"ContainerStarted","Data":"98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b"} Nov 25 17:17:31 crc kubenswrapper[4800]: I1125 17:17:31.465813 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-z7krd" podStartSLOduration=3.001237294 podStartE2EDuration="7.465790637s" podCreationTimestamp="2025-11-25 17:17:24 +0000 UTC" firstStartedPulling="2025-11-25 17:17:26.386078475 +0000 UTC m=+7207.440486967" lastFinishedPulling="2025-11-25 17:17:30.850631828 +0000 UTC m=+7211.905040310" observedRunningTime="2025-11-25 17:17:31.459525176 +0000 UTC m=+7212.513933688" watchObservedRunningTime="2025-11-25 17:17:31.465790637 +0000 UTC m=+7212.520199119" Nov 25 17:17:34 crc kubenswrapper[4800]: I1125 17:17:34.872742 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:34 crc kubenswrapper[4800]: I1125 17:17:34.873119 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:35 crc kubenswrapper[4800]: I1125 17:17:35.932280 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-z7krd" podUID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" containerName="registry-server" probeResult="failure" output=< Nov 25 17:17:35 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 17:17:35 crc kubenswrapper[4800]: > Nov 25 17:17:44 crc kubenswrapper[4800]: I1125 17:17:44.936313 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:44 crc kubenswrapper[4800]: I1125 17:17:44.987867 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:45 crc kubenswrapper[4800]: I1125 17:17:45.184053 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-z7krd"] Nov 25 17:17:46 crc kubenswrapper[4800]: I1125 17:17:46.591117 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-z7krd" podUID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" containerName="registry-server" containerID="cri-o://98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b" gracePeriod=2 Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.122506 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.270371 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kcchq\" (UniqueName: \"kubernetes.io/projected/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-kube-api-access-kcchq\") pod \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\" (UID: \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\") " Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.270432 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-utilities\") pod \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\" (UID: \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\") " Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.270504 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-catalog-content\") pod \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\" (UID: \"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7\") " Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.272817 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-utilities" (OuterVolumeSpecName: "utilities") pod "6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" (UID: "6e11a4d1-bc5a-4bab-aec3-8a97aba598f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.275886 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-kube-api-access-kcchq" (OuterVolumeSpecName: "kube-api-access-kcchq") pod "6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" (UID: "6e11a4d1-bc5a-4bab-aec3-8a97aba598f7"). InnerVolumeSpecName "kube-api-access-kcchq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.373085 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kcchq\" (UniqueName: \"kubernetes.io/projected/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-kube-api-access-kcchq\") on node \"crc\" DevicePath \"\"" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.373134 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.393555 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" (UID: "6e11a4d1-bc5a-4bab-aec3-8a97aba598f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.475896 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.604758 4800 generic.go:334] "Generic (PLEG): container finished" podID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" containerID="98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b" exitCode=0 Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.604802 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z7krd" event={"ID":"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7","Type":"ContainerDied","Data":"98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b"} Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.604868 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z7krd" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.605077 4800 scope.go:117] "RemoveContainer" containerID="98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.605059 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z7krd" event={"ID":"6e11a4d1-bc5a-4bab-aec3-8a97aba598f7","Type":"ContainerDied","Data":"391b6ff7d8bf747c68443bf5d0f7482f32f7e852cbe568e000d200d938828252"} Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.646866 4800 scope.go:117] "RemoveContainer" containerID="8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.647303 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-z7krd"] Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.664858 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-z7krd"] Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.674259 4800 scope.go:117] "RemoveContainer" containerID="168ca069111ad7307a88f1c892615e197d5d8e8808dca4418d84bab60a338496" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.714354 4800 scope.go:117] "RemoveContainer" containerID="98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b" Nov 25 17:17:47 crc kubenswrapper[4800]: E1125 17:17:47.714863 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b\": container with ID starting with 98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b not found: ID does not exist" containerID="98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.714892 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b"} err="failed to get container status \"98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b\": rpc error: code = NotFound desc = could not find container \"98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b\": container with ID starting with 98d5d97b10d8b8654c7b8b4241a80862fe59f782a8ab0d779701eac7e5a25b3b not found: ID does not exist" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.714913 4800 scope.go:117] "RemoveContainer" containerID="8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398" Nov 25 17:17:47 crc kubenswrapper[4800]: E1125 17:17:47.716064 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398\": container with ID starting with 8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398 not found: ID does not exist" containerID="8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.716115 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398"} err="failed to get container status \"8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398\": rpc error: code = NotFound desc = could not find container \"8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398\": container with ID starting with 8959518320c829f09107c63eee8e7b61e1341485a18a2688c9a2243523bca398 not found: ID does not exist" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.716147 4800 scope.go:117] "RemoveContainer" containerID="168ca069111ad7307a88f1c892615e197d5d8e8808dca4418d84bab60a338496" Nov 25 17:17:47 crc kubenswrapper[4800]: E1125 17:17:47.716479 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"168ca069111ad7307a88f1c892615e197d5d8e8808dca4418d84bab60a338496\": container with ID starting with 168ca069111ad7307a88f1c892615e197d5d8e8808dca4418d84bab60a338496 not found: ID does not exist" containerID="168ca069111ad7307a88f1c892615e197d5d8e8808dca4418d84bab60a338496" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.716536 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"168ca069111ad7307a88f1c892615e197d5d8e8808dca4418d84bab60a338496"} err="failed to get container status \"168ca069111ad7307a88f1c892615e197d5d8e8808dca4418d84bab60a338496\": rpc error: code = NotFound desc = could not find container \"168ca069111ad7307a88f1c892615e197d5d8e8808dca4418d84bab60a338496\": container with ID starting with 168ca069111ad7307a88f1c892615e197d5d8e8808dca4418d84bab60a338496 not found: ID does not exist" Nov 25 17:17:47 crc kubenswrapper[4800]: I1125 17:17:47.799223 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" path="/var/lib/kubelet/pods/6e11a4d1-bc5a-4bab-aec3-8a97aba598f7/volumes" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.614655 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gr45p"] Nov 25 17:17:59 crc kubenswrapper[4800]: E1125 17:17:59.615972 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" containerName="extract-utilities" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.615997 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" containerName="extract-utilities" Nov 25 17:17:59 crc kubenswrapper[4800]: E1125 17:17:59.616031 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" containerName="extract-content" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.616045 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" containerName="extract-content" Nov 25 17:17:59 crc kubenswrapper[4800]: E1125 17:17:59.616094 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" containerName="registry-server" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.616108 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" containerName="registry-server" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.616545 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e11a4d1-bc5a-4bab-aec3-8a97aba598f7" containerName="registry-server" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.619277 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.631281 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gr45p"] Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.655833 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-catalog-content\") pod \"community-operators-gr45p\" (UID: \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\") " pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.655932 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmdv4\" (UniqueName: \"kubernetes.io/projected/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-kube-api-access-gmdv4\") pod \"community-operators-gr45p\" (UID: \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\") " pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.656089 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-utilities\") pod \"community-operators-gr45p\" (UID: \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\") " pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.758074 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-utilities\") pod \"community-operators-gr45p\" (UID: \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\") " pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.758176 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-catalog-content\") pod \"community-operators-gr45p\" (UID: \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\") " pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.758243 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmdv4\" (UniqueName: \"kubernetes.io/projected/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-kube-api-access-gmdv4\") pod \"community-operators-gr45p\" (UID: \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\") " pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.758530 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-utilities\") pod \"community-operators-gr45p\" (UID: \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\") " pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.758763 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-catalog-content\") pod \"community-operators-gr45p\" (UID: \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\") " pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.782646 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmdv4\" (UniqueName: \"kubernetes.io/projected/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-kube-api-access-gmdv4\") pod \"community-operators-gr45p\" (UID: \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\") " pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:17:59 crc kubenswrapper[4800]: I1125 17:17:59.960373 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:18:00 crc kubenswrapper[4800]: I1125 17:18:00.494606 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gr45p"] Nov 25 17:18:00 crc kubenswrapper[4800]: I1125 17:18:00.746820 4800 generic.go:334] "Generic (PLEG): container finished" podID="a742187a-ed84-4c7b-8d0d-4d7238e6bafc" containerID="e4afc6cf47cd77f3b866ac77cdcf3d1e3f52c973d6c13ca66543b572e40cc09b" exitCode=0 Nov 25 17:18:00 crc kubenswrapper[4800]: I1125 17:18:00.747012 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gr45p" event={"ID":"a742187a-ed84-4c7b-8d0d-4d7238e6bafc","Type":"ContainerDied","Data":"e4afc6cf47cd77f3b866ac77cdcf3d1e3f52c973d6c13ca66543b572e40cc09b"} Nov 25 17:18:00 crc kubenswrapper[4800]: I1125 17:18:00.747972 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gr45p" event={"ID":"a742187a-ed84-4c7b-8d0d-4d7238e6bafc","Type":"ContainerStarted","Data":"a28c8d6d9f5e56ed5dd0c7cd27aee1df95c60f4b194420e65e646ef4ba80240c"} Nov 25 17:18:01 crc kubenswrapper[4800]: I1125 17:18:01.758333 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gr45p" event={"ID":"a742187a-ed84-4c7b-8d0d-4d7238e6bafc","Type":"ContainerStarted","Data":"76aed8baa95d193ae266ba371f8a988c605a705fc1069cc108c37ae65b50524d"} Nov 25 17:18:02 crc kubenswrapper[4800]: I1125 17:18:02.774286 4800 generic.go:334] "Generic (PLEG): container finished" podID="a742187a-ed84-4c7b-8d0d-4d7238e6bafc" containerID="76aed8baa95d193ae266ba371f8a988c605a705fc1069cc108c37ae65b50524d" exitCode=0 Nov 25 17:18:02 crc kubenswrapper[4800]: I1125 17:18:02.774758 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gr45p" event={"ID":"a742187a-ed84-4c7b-8d0d-4d7238e6bafc","Type":"ContainerDied","Data":"76aed8baa95d193ae266ba371f8a988c605a705fc1069cc108c37ae65b50524d"} Nov 25 17:18:03 crc kubenswrapper[4800]: I1125 17:18:03.799781 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gr45p" event={"ID":"a742187a-ed84-4c7b-8d0d-4d7238e6bafc","Type":"ContainerStarted","Data":"18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f"} Nov 25 17:18:03 crc kubenswrapper[4800]: I1125 17:18:03.808265 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gr45p" podStartSLOduration=2.365108533 podStartE2EDuration="4.808233692s" podCreationTimestamp="2025-11-25 17:17:59 +0000 UTC" firstStartedPulling="2025-11-25 17:18:00.748426802 +0000 UTC m=+7241.802835284" lastFinishedPulling="2025-11-25 17:18:03.191551961 +0000 UTC m=+7244.245960443" observedRunningTime="2025-11-25 17:18:03.803075391 +0000 UTC m=+7244.857483883" watchObservedRunningTime="2025-11-25 17:18:03.808233692 +0000 UTC m=+7244.862642184" Nov 25 17:18:09 crc kubenswrapper[4800]: I1125 17:18:09.961575 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:18:09 crc kubenswrapper[4800]: I1125 17:18:09.963079 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:18:10 crc kubenswrapper[4800]: I1125 17:18:10.052364 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:18:10 crc kubenswrapper[4800]: I1125 17:18:10.930179 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:18:10 crc kubenswrapper[4800]: I1125 17:18:10.995172 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gr45p"] Nov 25 17:18:12 crc kubenswrapper[4800]: I1125 17:18:12.884032 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gr45p" podUID="a742187a-ed84-4c7b-8d0d-4d7238e6bafc" containerName="registry-server" containerID="cri-o://18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f" gracePeriod=2 Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.424511 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.553627 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmdv4\" (UniqueName: \"kubernetes.io/projected/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-kube-api-access-gmdv4\") pod \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\" (UID: \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\") " Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.553732 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-utilities\") pod \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\" (UID: \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\") " Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.553819 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-catalog-content\") pod \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\" (UID: \"a742187a-ed84-4c7b-8d0d-4d7238e6bafc\") " Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.554537 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-utilities" (OuterVolumeSpecName: "utilities") pod "a742187a-ed84-4c7b-8d0d-4d7238e6bafc" (UID: "a742187a-ed84-4c7b-8d0d-4d7238e6bafc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.559879 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-kube-api-access-gmdv4" (OuterVolumeSpecName: "kube-api-access-gmdv4") pod "a742187a-ed84-4c7b-8d0d-4d7238e6bafc" (UID: "a742187a-ed84-4c7b-8d0d-4d7238e6bafc"). InnerVolumeSpecName "kube-api-access-gmdv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.612157 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a742187a-ed84-4c7b-8d0d-4d7238e6bafc" (UID: "a742187a-ed84-4c7b-8d0d-4d7238e6bafc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.657534 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.657641 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmdv4\" (UniqueName: \"kubernetes.io/projected/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-kube-api-access-gmdv4\") on node \"crc\" DevicePath \"\"" Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.657669 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a742187a-ed84-4c7b-8d0d-4d7238e6bafc-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.896678 4800 generic.go:334] "Generic (PLEG): container finished" podID="a742187a-ed84-4c7b-8d0d-4d7238e6bafc" containerID="18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f" exitCode=0 Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.896721 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gr45p" event={"ID":"a742187a-ed84-4c7b-8d0d-4d7238e6bafc","Type":"ContainerDied","Data":"18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f"} Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.896779 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gr45p" event={"ID":"a742187a-ed84-4c7b-8d0d-4d7238e6bafc","Type":"ContainerDied","Data":"a28c8d6d9f5e56ed5dd0c7cd27aee1df95c60f4b194420e65e646ef4ba80240c"} Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.896806 4800 scope.go:117] "RemoveContainer" containerID="18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f" Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.896737 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gr45p" Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.926092 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gr45p"] Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.937006 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gr45p"] Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.938159 4800 scope.go:117] "RemoveContainer" containerID="76aed8baa95d193ae266ba371f8a988c605a705fc1069cc108c37ae65b50524d" Nov 25 17:18:13 crc kubenswrapper[4800]: I1125 17:18:13.965449 4800 scope.go:117] "RemoveContainer" containerID="e4afc6cf47cd77f3b866ac77cdcf3d1e3f52c973d6c13ca66543b572e40cc09b" Nov 25 17:18:14 crc kubenswrapper[4800]: I1125 17:18:14.035197 4800 scope.go:117] "RemoveContainer" containerID="18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f" Nov 25 17:18:14 crc kubenswrapper[4800]: E1125 17:18:14.035794 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f\": container with ID starting with 18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f not found: ID does not exist" containerID="18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f" Nov 25 17:18:14 crc kubenswrapper[4800]: I1125 17:18:14.035871 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f"} err="failed to get container status \"18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f\": rpc error: code = NotFound desc = could not find container \"18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f\": container with ID starting with 18e397c9315beb9074e175319f2b166e4d0196d97d027fc31f28a57a13fc6e3f not found: ID does not exist" Nov 25 17:18:14 crc kubenswrapper[4800]: I1125 17:18:14.035907 4800 scope.go:117] "RemoveContainer" containerID="76aed8baa95d193ae266ba371f8a988c605a705fc1069cc108c37ae65b50524d" Nov 25 17:18:14 crc kubenswrapper[4800]: E1125 17:18:14.036407 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76aed8baa95d193ae266ba371f8a988c605a705fc1069cc108c37ae65b50524d\": container with ID starting with 76aed8baa95d193ae266ba371f8a988c605a705fc1069cc108c37ae65b50524d not found: ID does not exist" containerID="76aed8baa95d193ae266ba371f8a988c605a705fc1069cc108c37ae65b50524d" Nov 25 17:18:14 crc kubenswrapper[4800]: I1125 17:18:14.036457 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76aed8baa95d193ae266ba371f8a988c605a705fc1069cc108c37ae65b50524d"} err="failed to get container status \"76aed8baa95d193ae266ba371f8a988c605a705fc1069cc108c37ae65b50524d\": rpc error: code = NotFound desc = could not find container \"76aed8baa95d193ae266ba371f8a988c605a705fc1069cc108c37ae65b50524d\": container with ID starting with 76aed8baa95d193ae266ba371f8a988c605a705fc1069cc108c37ae65b50524d not found: ID does not exist" Nov 25 17:18:14 crc kubenswrapper[4800]: I1125 17:18:14.036484 4800 scope.go:117] "RemoveContainer" containerID="e4afc6cf47cd77f3b866ac77cdcf3d1e3f52c973d6c13ca66543b572e40cc09b" Nov 25 17:18:14 crc kubenswrapper[4800]: E1125 17:18:14.036754 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4afc6cf47cd77f3b866ac77cdcf3d1e3f52c973d6c13ca66543b572e40cc09b\": container with ID starting with e4afc6cf47cd77f3b866ac77cdcf3d1e3f52c973d6c13ca66543b572e40cc09b not found: ID does not exist" containerID="e4afc6cf47cd77f3b866ac77cdcf3d1e3f52c973d6c13ca66543b572e40cc09b" Nov 25 17:18:14 crc kubenswrapper[4800]: I1125 17:18:14.036784 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4afc6cf47cd77f3b866ac77cdcf3d1e3f52c973d6c13ca66543b572e40cc09b"} err="failed to get container status \"e4afc6cf47cd77f3b866ac77cdcf3d1e3f52c973d6c13ca66543b572e40cc09b\": rpc error: code = NotFound desc = could not find container \"e4afc6cf47cd77f3b866ac77cdcf3d1e3f52c973d6c13ca66543b572e40cc09b\": container with ID starting with e4afc6cf47cd77f3b866ac77cdcf3d1e3f52c973d6c13ca66543b572e40cc09b not found: ID does not exist" Nov 25 17:18:15 crc kubenswrapper[4800]: I1125 17:18:15.799275 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a742187a-ed84-4c7b-8d0d-4d7238e6bafc" path="/var/lib/kubelet/pods/a742187a-ed84-4c7b-8d0d-4d7238e6bafc/volumes" Nov 25 17:19:42 crc kubenswrapper[4800]: I1125 17:19:42.639951 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:19:42 crc kubenswrapper[4800]: I1125 17:19:42.640594 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.382038 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b8hsh"] Nov 25 17:19:57 crc kubenswrapper[4800]: E1125 17:19:57.383122 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a742187a-ed84-4c7b-8d0d-4d7238e6bafc" containerName="extract-utilities" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.383144 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a742187a-ed84-4c7b-8d0d-4d7238e6bafc" containerName="extract-utilities" Nov 25 17:19:57 crc kubenswrapper[4800]: E1125 17:19:57.383174 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a742187a-ed84-4c7b-8d0d-4d7238e6bafc" containerName="extract-content" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.383185 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a742187a-ed84-4c7b-8d0d-4d7238e6bafc" containerName="extract-content" Nov 25 17:19:57 crc kubenswrapper[4800]: E1125 17:19:57.383209 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a742187a-ed84-4c7b-8d0d-4d7238e6bafc" containerName="registry-server" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.383219 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a742187a-ed84-4c7b-8d0d-4d7238e6bafc" containerName="registry-server" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.383570 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="a742187a-ed84-4c7b-8d0d-4d7238e6bafc" containerName="registry-server" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.385775 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.393062 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b8hsh"] Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.460530 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgclr\" (UniqueName: \"kubernetes.io/projected/270edf00-4663-44e2-895b-46a0b076274b-kube-api-access-dgclr\") pod \"redhat-marketplace-b8hsh\" (UID: \"270edf00-4663-44e2-895b-46a0b076274b\") " pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.460687 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270edf00-4663-44e2-895b-46a0b076274b-utilities\") pod \"redhat-marketplace-b8hsh\" (UID: \"270edf00-4663-44e2-895b-46a0b076274b\") " pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.460723 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270edf00-4663-44e2-895b-46a0b076274b-catalog-content\") pod \"redhat-marketplace-b8hsh\" (UID: \"270edf00-4663-44e2-895b-46a0b076274b\") " pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.562389 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgclr\" (UniqueName: \"kubernetes.io/projected/270edf00-4663-44e2-895b-46a0b076274b-kube-api-access-dgclr\") pod \"redhat-marketplace-b8hsh\" (UID: \"270edf00-4663-44e2-895b-46a0b076274b\") " pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.562463 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270edf00-4663-44e2-895b-46a0b076274b-utilities\") pod \"redhat-marketplace-b8hsh\" (UID: \"270edf00-4663-44e2-895b-46a0b076274b\") " pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.562505 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270edf00-4663-44e2-895b-46a0b076274b-catalog-content\") pod \"redhat-marketplace-b8hsh\" (UID: \"270edf00-4663-44e2-895b-46a0b076274b\") " pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.563226 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270edf00-4663-44e2-895b-46a0b076274b-catalog-content\") pod \"redhat-marketplace-b8hsh\" (UID: \"270edf00-4663-44e2-895b-46a0b076274b\") " pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.563334 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270edf00-4663-44e2-895b-46a0b076274b-utilities\") pod \"redhat-marketplace-b8hsh\" (UID: \"270edf00-4663-44e2-895b-46a0b076274b\") " pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.588694 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgclr\" (UniqueName: \"kubernetes.io/projected/270edf00-4663-44e2-895b-46a0b076274b-kube-api-access-dgclr\") pod \"redhat-marketplace-b8hsh\" (UID: \"270edf00-4663-44e2-895b-46a0b076274b\") " pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:19:57 crc kubenswrapper[4800]: I1125 17:19:57.702867 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:19:58 crc kubenswrapper[4800]: I1125 17:19:58.247620 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b8hsh"] Nov 25 17:19:58 crc kubenswrapper[4800]: I1125 17:19:58.710130 4800 generic.go:334] "Generic (PLEG): container finished" podID="270edf00-4663-44e2-895b-46a0b076274b" containerID="679abbd2eeceba7180ea6799b454f8593169c283c92534c9f6fbbd9d4b375a05" exitCode=0 Nov 25 17:19:58 crc kubenswrapper[4800]: I1125 17:19:58.710243 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8hsh" event={"ID":"270edf00-4663-44e2-895b-46a0b076274b","Type":"ContainerDied","Data":"679abbd2eeceba7180ea6799b454f8593169c283c92534c9f6fbbd9d4b375a05"} Nov 25 17:19:58 crc kubenswrapper[4800]: I1125 17:19:58.710418 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8hsh" event={"ID":"270edf00-4663-44e2-895b-46a0b076274b","Type":"ContainerStarted","Data":"bfbef0c77ad2571aae7d8854895599e7c7dd1022e8dd892a37b3faedbb306841"} Nov 25 17:20:00 crc kubenswrapper[4800]: I1125 17:20:00.737656 4800 generic.go:334] "Generic (PLEG): container finished" podID="270edf00-4663-44e2-895b-46a0b076274b" containerID="27b634f44364a6bf485f10d38174284a1bbe9b3ce22d67dd1a7b789e80da9588" exitCode=0 Nov 25 17:20:00 crc kubenswrapper[4800]: I1125 17:20:00.737793 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8hsh" event={"ID":"270edf00-4663-44e2-895b-46a0b076274b","Type":"ContainerDied","Data":"27b634f44364a6bf485f10d38174284a1bbe9b3ce22d67dd1a7b789e80da9588"} Nov 25 17:20:01 crc kubenswrapper[4800]: I1125 17:20:01.751133 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8hsh" event={"ID":"270edf00-4663-44e2-895b-46a0b076274b","Type":"ContainerStarted","Data":"56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a"} Nov 25 17:20:01 crc kubenswrapper[4800]: I1125 17:20:01.786533 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b8hsh" podStartSLOduration=2.364742133 podStartE2EDuration="4.786507411s" podCreationTimestamp="2025-11-25 17:19:57 +0000 UTC" firstStartedPulling="2025-11-25 17:19:58.711986141 +0000 UTC m=+7359.766394623" lastFinishedPulling="2025-11-25 17:20:01.133751419 +0000 UTC m=+7362.188159901" observedRunningTime="2025-11-25 17:20:01.777572288 +0000 UTC m=+7362.831980800" watchObservedRunningTime="2025-11-25 17:20:01.786507411 +0000 UTC m=+7362.840915903" Nov 25 17:20:07 crc kubenswrapper[4800]: I1125 17:20:07.703964 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:20:07 crc kubenswrapper[4800]: I1125 17:20:07.705083 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:20:07 crc kubenswrapper[4800]: I1125 17:20:07.800474 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:20:07 crc kubenswrapper[4800]: I1125 17:20:07.858780 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:20:08 crc kubenswrapper[4800]: I1125 17:20:08.039402 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b8hsh"] Nov 25 17:20:09 crc kubenswrapper[4800]: I1125 17:20:09.820133 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b8hsh" podUID="270edf00-4663-44e2-895b-46a0b076274b" containerName="registry-server" containerID="cri-o://56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a" gracePeriod=2 Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.295343 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.470365 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270edf00-4663-44e2-895b-46a0b076274b-utilities\") pod \"270edf00-4663-44e2-895b-46a0b076274b\" (UID: \"270edf00-4663-44e2-895b-46a0b076274b\") " Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.470648 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270edf00-4663-44e2-895b-46a0b076274b-catalog-content\") pod \"270edf00-4663-44e2-895b-46a0b076274b\" (UID: \"270edf00-4663-44e2-895b-46a0b076274b\") " Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.470688 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgclr\" (UniqueName: \"kubernetes.io/projected/270edf00-4663-44e2-895b-46a0b076274b-kube-api-access-dgclr\") pod \"270edf00-4663-44e2-895b-46a0b076274b\" (UID: \"270edf00-4663-44e2-895b-46a0b076274b\") " Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.471480 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/270edf00-4663-44e2-895b-46a0b076274b-utilities" (OuterVolumeSpecName: "utilities") pod "270edf00-4663-44e2-895b-46a0b076274b" (UID: "270edf00-4663-44e2-895b-46a0b076274b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.472141 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270edf00-4663-44e2-895b-46a0b076274b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.476464 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/270edf00-4663-44e2-895b-46a0b076274b-kube-api-access-dgclr" (OuterVolumeSpecName: "kube-api-access-dgclr") pod "270edf00-4663-44e2-895b-46a0b076274b" (UID: "270edf00-4663-44e2-895b-46a0b076274b"). InnerVolumeSpecName "kube-api-access-dgclr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.491509 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/270edf00-4663-44e2-895b-46a0b076274b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "270edf00-4663-44e2-895b-46a0b076274b" (UID: "270edf00-4663-44e2-895b-46a0b076274b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.573541 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270edf00-4663-44e2-895b-46a0b076274b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.573575 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgclr\" (UniqueName: \"kubernetes.io/projected/270edf00-4663-44e2-895b-46a0b076274b-kube-api-access-dgclr\") on node \"crc\" DevicePath \"\"" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.830404 4800 generic.go:334] "Generic (PLEG): container finished" podID="270edf00-4663-44e2-895b-46a0b076274b" containerID="56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a" exitCode=0 Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.830450 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8hsh" event={"ID":"270edf00-4663-44e2-895b-46a0b076274b","Type":"ContainerDied","Data":"56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a"} Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.830480 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8hsh" event={"ID":"270edf00-4663-44e2-895b-46a0b076274b","Type":"ContainerDied","Data":"bfbef0c77ad2571aae7d8854895599e7c7dd1022e8dd892a37b3faedbb306841"} Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.830504 4800 scope.go:117] "RemoveContainer" containerID="56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.830650 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b8hsh" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.886875 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b8hsh"] Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.899765 4800 scope.go:117] "RemoveContainer" containerID="27b634f44364a6bf485f10d38174284a1bbe9b3ce22d67dd1a7b789e80da9588" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.901531 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-b8hsh"] Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.924382 4800 scope.go:117] "RemoveContainer" containerID="679abbd2eeceba7180ea6799b454f8593169c283c92534c9f6fbbd9d4b375a05" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.980364 4800 scope.go:117] "RemoveContainer" containerID="56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a" Nov 25 17:20:10 crc kubenswrapper[4800]: E1125 17:20:10.982064 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a\": container with ID starting with 56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a not found: ID does not exist" containerID="56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.982093 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a"} err="failed to get container status \"56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a\": rpc error: code = NotFound desc = could not find container \"56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a\": container with ID starting with 56ecba4c8fa9b4b7b3e7f229aa411be6ca1f8d15ec37ad589e4e744cf36b117a not found: ID does not exist" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.982112 4800 scope.go:117] "RemoveContainer" containerID="27b634f44364a6bf485f10d38174284a1bbe9b3ce22d67dd1a7b789e80da9588" Nov 25 17:20:10 crc kubenswrapper[4800]: E1125 17:20:10.982379 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27b634f44364a6bf485f10d38174284a1bbe9b3ce22d67dd1a7b789e80da9588\": container with ID starting with 27b634f44364a6bf485f10d38174284a1bbe9b3ce22d67dd1a7b789e80da9588 not found: ID does not exist" containerID="27b634f44364a6bf485f10d38174284a1bbe9b3ce22d67dd1a7b789e80da9588" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.982495 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27b634f44364a6bf485f10d38174284a1bbe9b3ce22d67dd1a7b789e80da9588"} err="failed to get container status \"27b634f44364a6bf485f10d38174284a1bbe9b3ce22d67dd1a7b789e80da9588\": rpc error: code = NotFound desc = could not find container \"27b634f44364a6bf485f10d38174284a1bbe9b3ce22d67dd1a7b789e80da9588\": container with ID starting with 27b634f44364a6bf485f10d38174284a1bbe9b3ce22d67dd1a7b789e80da9588 not found: ID does not exist" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.982614 4800 scope.go:117] "RemoveContainer" containerID="679abbd2eeceba7180ea6799b454f8593169c283c92534c9f6fbbd9d4b375a05" Nov 25 17:20:10 crc kubenswrapper[4800]: E1125 17:20:10.983132 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"679abbd2eeceba7180ea6799b454f8593169c283c92534c9f6fbbd9d4b375a05\": container with ID starting with 679abbd2eeceba7180ea6799b454f8593169c283c92534c9f6fbbd9d4b375a05 not found: ID does not exist" containerID="679abbd2eeceba7180ea6799b454f8593169c283c92534c9f6fbbd9d4b375a05" Nov 25 17:20:10 crc kubenswrapper[4800]: I1125 17:20:10.983241 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"679abbd2eeceba7180ea6799b454f8593169c283c92534c9f6fbbd9d4b375a05"} err="failed to get container status \"679abbd2eeceba7180ea6799b454f8593169c283c92534c9f6fbbd9d4b375a05\": rpc error: code = NotFound desc = could not find container \"679abbd2eeceba7180ea6799b454f8593169c283c92534c9f6fbbd9d4b375a05\": container with ID starting with 679abbd2eeceba7180ea6799b454f8593169c283c92534c9f6fbbd9d4b375a05 not found: ID does not exist" Nov 25 17:20:11 crc kubenswrapper[4800]: I1125 17:20:11.796393 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="270edf00-4663-44e2-895b-46a0b076274b" path="/var/lib/kubelet/pods/270edf00-4663-44e2-895b-46a0b076274b/volumes" Nov 25 17:20:12 crc kubenswrapper[4800]: I1125 17:20:12.640246 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:20:12 crc kubenswrapper[4800]: I1125 17:20:12.640574 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:20:42 crc kubenswrapper[4800]: I1125 17:20:42.640019 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:20:42 crc kubenswrapper[4800]: I1125 17:20:42.640695 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:20:42 crc kubenswrapper[4800]: I1125 17:20:42.640749 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 17:20:42 crc kubenswrapper[4800]: I1125 17:20:42.641671 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:20:42 crc kubenswrapper[4800]: I1125 17:20:42.641744 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" gracePeriod=600 Nov 25 17:20:42 crc kubenswrapper[4800]: E1125 17:20:42.769398 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:20:43 crc kubenswrapper[4800]: I1125 17:20:43.204230 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" exitCode=0 Nov 25 17:20:43 crc kubenswrapper[4800]: I1125 17:20:43.204356 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d"} Nov 25 17:20:43 crc kubenswrapper[4800]: I1125 17:20:43.204935 4800 scope.go:117] "RemoveContainer" containerID="8e9e97e93a63f48db0bb25586ec236ba4713e3250e9bda953e77defe1f728f5b" Nov 25 17:20:43 crc kubenswrapper[4800]: I1125 17:20:43.206306 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:20:43 crc kubenswrapper[4800]: E1125 17:20:43.206960 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:20:55 crc kubenswrapper[4800]: I1125 17:20:55.785534 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:20:55 crc kubenswrapper[4800]: E1125 17:20:55.786180 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.558079 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gmhfx"] Nov 25 17:21:00 crc kubenswrapper[4800]: E1125 17:21:00.560337 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="270edf00-4663-44e2-895b-46a0b076274b" containerName="registry-server" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.560452 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="270edf00-4663-44e2-895b-46a0b076274b" containerName="registry-server" Nov 25 17:21:00 crc kubenswrapper[4800]: E1125 17:21:00.560592 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="270edf00-4663-44e2-895b-46a0b076274b" containerName="extract-content" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.560693 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="270edf00-4663-44e2-895b-46a0b076274b" containerName="extract-content" Nov 25 17:21:00 crc kubenswrapper[4800]: E1125 17:21:00.560788 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="270edf00-4663-44e2-895b-46a0b076274b" containerName="extract-utilities" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.560911 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="270edf00-4663-44e2-895b-46a0b076274b" containerName="extract-utilities" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.561276 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="270edf00-4663-44e2-895b-46a0b076274b" containerName="registry-server" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.563391 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gmhfx"] Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.563580 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.704409 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4pfz\" (UniqueName: \"kubernetes.io/projected/7d6d43f2-0aa9-4c10-a247-659f15e730f3-kube-api-access-c4pfz\") pod \"certified-operators-gmhfx\" (UID: \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\") " pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.704762 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d6d43f2-0aa9-4c10-a247-659f15e730f3-utilities\") pod \"certified-operators-gmhfx\" (UID: \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\") " pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.704792 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d6d43f2-0aa9-4c10-a247-659f15e730f3-catalog-content\") pod \"certified-operators-gmhfx\" (UID: \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\") " pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.806433 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4pfz\" (UniqueName: \"kubernetes.io/projected/7d6d43f2-0aa9-4c10-a247-659f15e730f3-kube-api-access-c4pfz\") pod \"certified-operators-gmhfx\" (UID: \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\") " pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.806519 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d6d43f2-0aa9-4c10-a247-659f15e730f3-utilities\") pod \"certified-operators-gmhfx\" (UID: \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\") " pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.806541 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d6d43f2-0aa9-4c10-a247-659f15e730f3-catalog-content\") pod \"certified-operators-gmhfx\" (UID: \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\") " pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.807082 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d6d43f2-0aa9-4c10-a247-659f15e730f3-catalog-content\") pod \"certified-operators-gmhfx\" (UID: \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\") " pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.807324 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d6d43f2-0aa9-4c10-a247-659f15e730f3-utilities\") pod \"certified-operators-gmhfx\" (UID: \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\") " pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.829557 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4pfz\" (UniqueName: \"kubernetes.io/projected/7d6d43f2-0aa9-4c10-a247-659f15e730f3-kube-api-access-c4pfz\") pod \"certified-operators-gmhfx\" (UID: \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\") " pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:00 crc kubenswrapper[4800]: I1125 17:21:00.891621 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:01 crc kubenswrapper[4800]: I1125 17:21:01.423682 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gmhfx"] Nov 25 17:21:02 crc kubenswrapper[4800]: I1125 17:21:02.395286 4800 generic.go:334] "Generic (PLEG): container finished" podID="7d6d43f2-0aa9-4c10-a247-659f15e730f3" containerID="6d4be339a8a0fc7d1c82bf1c63a822427ad6657f0d5f78265c8c9826293e52e3" exitCode=0 Nov 25 17:21:02 crc kubenswrapper[4800]: I1125 17:21:02.395565 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmhfx" event={"ID":"7d6d43f2-0aa9-4c10-a247-659f15e730f3","Type":"ContainerDied","Data":"6d4be339a8a0fc7d1c82bf1c63a822427ad6657f0d5f78265c8c9826293e52e3"} Nov 25 17:21:02 crc kubenswrapper[4800]: I1125 17:21:02.395592 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmhfx" event={"ID":"7d6d43f2-0aa9-4c10-a247-659f15e730f3","Type":"ContainerStarted","Data":"db2bb455133fc5d5e115879be2a4c15c005d634b60638af9080b75bf47af772a"} Nov 25 17:21:03 crc kubenswrapper[4800]: I1125 17:21:03.406211 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmhfx" event={"ID":"7d6d43f2-0aa9-4c10-a247-659f15e730f3","Type":"ContainerStarted","Data":"5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0"} Nov 25 17:21:04 crc kubenswrapper[4800]: E1125 17:21:04.112727 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d6d43f2_0aa9_4c10_a247_659f15e730f3.slice/crio-5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d6d43f2_0aa9_4c10_a247_659f15e730f3.slice/crio-conmon-5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:21:04 crc kubenswrapper[4800]: I1125 17:21:04.419248 4800 generic.go:334] "Generic (PLEG): container finished" podID="7d6d43f2-0aa9-4c10-a247-659f15e730f3" containerID="5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0" exitCode=0 Nov 25 17:21:04 crc kubenswrapper[4800]: I1125 17:21:04.419338 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmhfx" event={"ID":"7d6d43f2-0aa9-4c10-a247-659f15e730f3","Type":"ContainerDied","Data":"5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0"} Nov 25 17:21:05 crc kubenswrapper[4800]: I1125 17:21:05.431429 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmhfx" event={"ID":"7d6d43f2-0aa9-4c10-a247-659f15e730f3","Type":"ContainerStarted","Data":"7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046"} Nov 25 17:21:05 crc kubenswrapper[4800]: I1125 17:21:05.454514 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gmhfx" podStartSLOduration=3.026535434 podStartE2EDuration="5.45449418s" podCreationTimestamp="2025-11-25 17:21:00 +0000 UTC" firstStartedPulling="2025-11-25 17:21:02.39720145 +0000 UTC m=+7423.451609932" lastFinishedPulling="2025-11-25 17:21:04.825160206 +0000 UTC m=+7425.879568678" observedRunningTime="2025-11-25 17:21:05.448798455 +0000 UTC m=+7426.503206947" watchObservedRunningTime="2025-11-25 17:21:05.45449418 +0000 UTC m=+7426.508902672" Nov 25 17:21:08 crc kubenswrapper[4800]: I1125 17:21:08.785598 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:21:08 crc kubenswrapper[4800]: E1125 17:21:08.786530 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:21:10 crc kubenswrapper[4800]: I1125 17:21:10.891905 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:10 crc kubenswrapper[4800]: I1125 17:21:10.893208 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:10 crc kubenswrapper[4800]: I1125 17:21:10.953524 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:11 crc kubenswrapper[4800]: I1125 17:21:11.543708 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:11 crc kubenswrapper[4800]: I1125 17:21:11.594490 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gmhfx"] Nov 25 17:21:13 crc kubenswrapper[4800]: I1125 17:21:13.523196 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gmhfx" podUID="7d6d43f2-0aa9-4c10-a247-659f15e730f3" containerName="registry-server" containerID="cri-o://7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046" gracePeriod=2 Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.122982 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.302517 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d6d43f2-0aa9-4c10-a247-659f15e730f3-utilities\") pod \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\" (UID: \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\") " Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.302758 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4pfz\" (UniqueName: \"kubernetes.io/projected/7d6d43f2-0aa9-4c10-a247-659f15e730f3-kube-api-access-c4pfz\") pod \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\" (UID: \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\") " Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.302904 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d6d43f2-0aa9-4c10-a247-659f15e730f3-catalog-content\") pod \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\" (UID: \"7d6d43f2-0aa9-4c10-a247-659f15e730f3\") " Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.303936 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d6d43f2-0aa9-4c10-a247-659f15e730f3-utilities" (OuterVolumeSpecName: "utilities") pod "7d6d43f2-0aa9-4c10-a247-659f15e730f3" (UID: "7d6d43f2-0aa9-4c10-a247-659f15e730f3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.309904 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d6d43f2-0aa9-4c10-a247-659f15e730f3-kube-api-access-c4pfz" (OuterVolumeSpecName: "kube-api-access-c4pfz") pod "7d6d43f2-0aa9-4c10-a247-659f15e730f3" (UID: "7d6d43f2-0aa9-4c10-a247-659f15e730f3"). InnerVolumeSpecName "kube-api-access-c4pfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.406915 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d6d43f2-0aa9-4c10-a247-659f15e730f3-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.406976 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4pfz\" (UniqueName: \"kubernetes.io/projected/7d6d43f2-0aa9-4c10-a247-659f15e730f3-kube-api-access-c4pfz\") on node \"crc\" DevicePath \"\"" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.535695 4800 generic.go:334] "Generic (PLEG): container finished" podID="7d6d43f2-0aa9-4c10-a247-659f15e730f3" containerID="7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046" exitCode=0 Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.535742 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmhfx" event={"ID":"7d6d43f2-0aa9-4c10-a247-659f15e730f3","Type":"ContainerDied","Data":"7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046"} Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.535769 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmhfx" event={"ID":"7d6d43f2-0aa9-4c10-a247-659f15e730f3","Type":"ContainerDied","Data":"db2bb455133fc5d5e115879be2a4c15c005d634b60638af9080b75bf47af772a"} Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.535787 4800 scope.go:117] "RemoveContainer" containerID="7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.535913 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gmhfx" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.564125 4800 scope.go:117] "RemoveContainer" containerID="5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.586187 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d6d43f2-0aa9-4c10-a247-659f15e730f3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7d6d43f2-0aa9-4c10-a247-659f15e730f3" (UID: "7d6d43f2-0aa9-4c10-a247-659f15e730f3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.593976 4800 scope.go:117] "RemoveContainer" containerID="6d4be339a8a0fc7d1c82bf1c63a822427ad6657f0d5f78265c8c9826293e52e3" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.612306 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d6d43f2-0aa9-4c10-a247-659f15e730f3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.671104 4800 scope.go:117] "RemoveContainer" containerID="7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046" Nov 25 17:21:14 crc kubenswrapper[4800]: E1125 17:21:14.671775 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046\": container with ID starting with 7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046 not found: ID does not exist" containerID="7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.671870 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046"} err="failed to get container status \"7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046\": rpc error: code = NotFound desc = could not find container \"7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046\": container with ID starting with 7c00155a506c1314f65a9eeee315563e20ea5066376e2a7bd2dcc2c8272e8046 not found: ID does not exist" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.671917 4800 scope.go:117] "RemoveContainer" containerID="5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0" Nov 25 17:21:14 crc kubenswrapper[4800]: E1125 17:21:14.672392 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0\": container with ID starting with 5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0 not found: ID does not exist" containerID="5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.672433 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0"} err="failed to get container status \"5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0\": rpc error: code = NotFound desc = could not find container \"5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0\": container with ID starting with 5fc42d476d3e706bc234c0878d5de3bd61ccaac982e7f0e9e4cc507c37e9abb0 not found: ID does not exist" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.672458 4800 scope.go:117] "RemoveContainer" containerID="6d4be339a8a0fc7d1c82bf1c63a822427ad6657f0d5f78265c8c9826293e52e3" Nov 25 17:21:14 crc kubenswrapper[4800]: E1125 17:21:14.672999 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d4be339a8a0fc7d1c82bf1c63a822427ad6657f0d5f78265c8c9826293e52e3\": container with ID starting with 6d4be339a8a0fc7d1c82bf1c63a822427ad6657f0d5f78265c8c9826293e52e3 not found: ID does not exist" containerID="6d4be339a8a0fc7d1c82bf1c63a822427ad6657f0d5f78265c8c9826293e52e3" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.673066 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d4be339a8a0fc7d1c82bf1c63a822427ad6657f0d5f78265c8c9826293e52e3"} err="failed to get container status \"6d4be339a8a0fc7d1c82bf1c63a822427ad6657f0d5f78265c8c9826293e52e3\": rpc error: code = NotFound desc = could not find container \"6d4be339a8a0fc7d1c82bf1c63a822427ad6657f0d5f78265c8c9826293e52e3\": container with ID starting with 6d4be339a8a0fc7d1c82bf1c63a822427ad6657f0d5f78265c8c9826293e52e3 not found: ID does not exist" Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.883877 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gmhfx"] Nov 25 17:21:14 crc kubenswrapper[4800]: I1125 17:21:14.899289 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gmhfx"] Nov 25 17:21:15 crc kubenswrapper[4800]: I1125 17:21:15.810350 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d6d43f2-0aa9-4c10-a247-659f15e730f3" path="/var/lib/kubelet/pods/7d6d43f2-0aa9-4c10-a247-659f15e730f3/volumes" Nov 25 17:21:21 crc kubenswrapper[4800]: I1125 17:21:21.786500 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:21:21 crc kubenswrapper[4800]: E1125 17:21:21.787584 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:21:32 crc kubenswrapper[4800]: I1125 17:21:32.786069 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:21:32 crc kubenswrapper[4800]: E1125 17:21:32.787483 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:21:43 crc kubenswrapper[4800]: I1125 17:21:43.786263 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:21:43 crc kubenswrapper[4800]: E1125 17:21:43.787327 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:21:57 crc kubenswrapper[4800]: I1125 17:21:57.785922 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:21:57 crc kubenswrapper[4800]: E1125 17:21:57.786780 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:22:11 crc kubenswrapper[4800]: I1125 17:22:11.785809 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:22:11 crc kubenswrapper[4800]: E1125 17:22:11.786442 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:22:24 crc kubenswrapper[4800]: I1125 17:22:24.785776 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:22:24 crc kubenswrapper[4800]: E1125 17:22:24.786545 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:22:37 crc kubenswrapper[4800]: I1125 17:22:37.785588 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:22:37 crc kubenswrapper[4800]: E1125 17:22:37.786608 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:22:52 crc kubenswrapper[4800]: I1125 17:22:52.785725 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:22:52 crc kubenswrapper[4800]: E1125 17:22:52.786501 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:23:05 crc kubenswrapper[4800]: I1125 17:23:05.786027 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:23:05 crc kubenswrapper[4800]: E1125 17:23:05.786867 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:23:17 crc kubenswrapper[4800]: I1125 17:23:17.786089 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:23:17 crc kubenswrapper[4800]: E1125 17:23:17.786977 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:23:28 crc kubenswrapper[4800]: I1125 17:23:28.786244 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:23:28 crc kubenswrapper[4800]: E1125 17:23:28.787570 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:23:40 crc kubenswrapper[4800]: I1125 17:23:40.786668 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:23:40 crc kubenswrapper[4800]: E1125 17:23:40.787622 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:23:55 crc kubenswrapper[4800]: I1125 17:23:55.785338 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:23:55 crc kubenswrapper[4800]: E1125 17:23:55.786452 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:24:10 crc kubenswrapper[4800]: I1125 17:24:10.786570 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:24:10 crc kubenswrapper[4800]: E1125 17:24:10.787478 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:24:24 crc kubenswrapper[4800]: I1125 17:24:24.785780 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:24:24 crc kubenswrapper[4800]: E1125 17:24:24.786612 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:24:39 crc kubenswrapper[4800]: I1125 17:24:39.794607 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:24:39 crc kubenswrapper[4800]: E1125 17:24:39.796974 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:24:51 crc kubenswrapper[4800]: I1125 17:24:51.786505 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:24:51 crc kubenswrapper[4800]: E1125 17:24:51.787941 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:25:04 crc kubenswrapper[4800]: I1125 17:25:04.785804 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:25:04 crc kubenswrapper[4800]: E1125 17:25:04.786654 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:25:17 crc kubenswrapper[4800]: I1125 17:25:17.785892 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:25:17 crc kubenswrapper[4800]: E1125 17:25:17.787159 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:25:32 crc kubenswrapper[4800]: I1125 17:25:32.786458 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:25:32 crc kubenswrapper[4800]: E1125 17:25:32.787966 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:25:46 crc kubenswrapper[4800]: I1125 17:25:46.790732 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:25:47 crc kubenswrapper[4800]: I1125 17:25:47.513303 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"8f44cf5110cad2c84d575cff5470f84ba7a97098df2e62b4b9d02fa05bcea5e6"} Nov 25 17:28:12 crc kubenswrapper[4800]: I1125 17:28:12.640148 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:28:12 crc kubenswrapper[4800]: I1125 17:28:12.640924 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.391560 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b27vm"] Nov 25 17:28:22 crc kubenswrapper[4800]: E1125 17:28:22.392617 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d6d43f2-0aa9-4c10-a247-659f15e730f3" containerName="extract-utilities" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.392636 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d6d43f2-0aa9-4c10-a247-659f15e730f3" containerName="extract-utilities" Nov 25 17:28:22 crc kubenswrapper[4800]: E1125 17:28:22.392671 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d6d43f2-0aa9-4c10-a247-659f15e730f3" containerName="extract-content" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.392679 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d6d43f2-0aa9-4c10-a247-659f15e730f3" containerName="extract-content" Nov 25 17:28:22 crc kubenswrapper[4800]: E1125 17:28:22.392704 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d6d43f2-0aa9-4c10-a247-659f15e730f3" containerName="registry-server" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.392714 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d6d43f2-0aa9-4c10-a247-659f15e730f3" containerName="registry-server" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.393047 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d6d43f2-0aa9-4c10-a247-659f15e730f3" containerName="registry-server" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.394910 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.410067 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b27vm"] Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.537389 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92175acb-17bf-4bb8-b4cd-b1097f116691-utilities\") pod \"community-operators-b27vm\" (UID: \"92175acb-17bf-4bb8-b4cd-b1097f116691\") " pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.537457 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rd4fj\" (UniqueName: \"kubernetes.io/projected/92175acb-17bf-4bb8-b4cd-b1097f116691-kube-api-access-rd4fj\") pod \"community-operators-b27vm\" (UID: \"92175acb-17bf-4bb8-b4cd-b1097f116691\") " pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.537522 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92175acb-17bf-4bb8-b4cd-b1097f116691-catalog-content\") pod \"community-operators-b27vm\" (UID: \"92175acb-17bf-4bb8-b4cd-b1097f116691\") " pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.639474 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92175acb-17bf-4bb8-b4cd-b1097f116691-utilities\") pod \"community-operators-b27vm\" (UID: \"92175acb-17bf-4bb8-b4cd-b1097f116691\") " pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.639526 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rd4fj\" (UniqueName: \"kubernetes.io/projected/92175acb-17bf-4bb8-b4cd-b1097f116691-kube-api-access-rd4fj\") pod \"community-operators-b27vm\" (UID: \"92175acb-17bf-4bb8-b4cd-b1097f116691\") " pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.639562 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92175acb-17bf-4bb8-b4cd-b1097f116691-catalog-content\") pod \"community-operators-b27vm\" (UID: \"92175acb-17bf-4bb8-b4cd-b1097f116691\") " pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.640146 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92175acb-17bf-4bb8-b4cd-b1097f116691-catalog-content\") pod \"community-operators-b27vm\" (UID: \"92175acb-17bf-4bb8-b4cd-b1097f116691\") " pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.640361 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92175acb-17bf-4bb8-b4cd-b1097f116691-utilities\") pod \"community-operators-b27vm\" (UID: \"92175acb-17bf-4bb8-b4cd-b1097f116691\") " pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.660699 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rd4fj\" (UniqueName: \"kubernetes.io/projected/92175acb-17bf-4bb8-b4cd-b1097f116691-kube-api-access-rd4fj\") pod \"community-operators-b27vm\" (UID: \"92175acb-17bf-4bb8-b4cd-b1097f116691\") " pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:22 crc kubenswrapper[4800]: I1125 17:28:22.717313 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:23 crc kubenswrapper[4800]: I1125 17:28:23.225561 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b27vm"] Nov 25 17:28:24 crc kubenswrapper[4800]: I1125 17:28:24.180576 4800 generic.go:334] "Generic (PLEG): container finished" podID="92175acb-17bf-4bb8-b4cd-b1097f116691" containerID="4735f19baff3769c875b1f14dba493b88822e731a7bf365dd58860966e49ee1b" exitCode=0 Nov 25 17:28:24 crc kubenswrapper[4800]: I1125 17:28:24.180632 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b27vm" event={"ID":"92175acb-17bf-4bb8-b4cd-b1097f116691","Type":"ContainerDied","Data":"4735f19baff3769c875b1f14dba493b88822e731a7bf365dd58860966e49ee1b"} Nov 25 17:28:24 crc kubenswrapper[4800]: I1125 17:28:24.180663 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b27vm" event={"ID":"92175acb-17bf-4bb8-b4cd-b1097f116691","Type":"ContainerStarted","Data":"85b934ecd8324a2396e10444cf482a224ad9ca7aefd57a463728ab7af3f7090f"} Nov 25 17:28:24 crc kubenswrapper[4800]: I1125 17:28:24.184483 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.202095 4800 generic.go:334] "Generic (PLEG): container finished" podID="92175acb-17bf-4bb8-b4cd-b1097f116691" containerID="824b02a02ed737093935069e6ad693f3cbe6f352f0a70ca42acc7d983dfad5ee" exitCode=0 Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.202223 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b27vm" event={"ID":"92175acb-17bf-4bb8-b4cd-b1097f116691","Type":"ContainerDied","Data":"824b02a02ed737093935069e6ad693f3cbe6f352f0a70ca42acc7d983dfad5ee"} Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.568037 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b5nms"] Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.570960 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.578132 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b5nms"] Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.727254 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a13277de-0659-484a-9129-68147914939f-catalog-content\") pod \"redhat-operators-b5nms\" (UID: \"a13277de-0659-484a-9129-68147914939f\") " pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.727370 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6q6mm\" (UniqueName: \"kubernetes.io/projected/a13277de-0659-484a-9129-68147914939f-kube-api-access-6q6mm\") pod \"redhat-operators-b5nms\" (UID: \"a13277de-0659-484a-9129-68147914939f\") " pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.727437 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a13277de-0659-484a-9129-68147914939f-utilities\") pod \"redhat-operators-b5nms\" (UID: \"a13277de-0659-484a-9129-68147914939f\") " pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.828902 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6q6mm\" (UniqueName: \"kubernetes.io/projected/a13277de-0659-484a-9129-68147914939f-kube-api-access-6q6mm\") pod \"redhat-operators-b5nms\" (UID: \"a13277de-0659-484a-9129-68147914939f\") " pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.829335 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a13277de-0659-484a-9129-68147914939f-utilities\") pod \"redhat-operators-b5nms\" (UID: \"a13277de-0659-484a-9129-68147914939f\") " pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.829511 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a13277de-0659-484a-9129-68147914939f-catalog-content\") pod \"redhat-operators-b5nms\" (UID: \"a13277de-0659-484a-9129-68147914939f\") " pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.830137 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a13277de-0659-484a-9129-68147914939f-utilities\") pod \"redhat-operators-b5nms\" (UID: \"a13277de-0659-484a-9129-68147914939f\") " pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.830665 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a13277de-0659-484a-9129-68147914939f-catalog-content\") pod \"redhat-operators-b5nms\" (UID: \"a13277de-0659-484a-9129-68147914939f\") " pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.852300 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6q6mm\" (UniqueName: \"kubernetes.io/projected/a13277de-0659-484a-9129-68147914939f-kube-api-access-6q6mm\") pod \"redhat-operators-b5nms\" (UID: \"a13277de-0659-484a-9129-68147914939f\") " pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:26 crc kubenswrapper[4800]: I1125 17:28:26.895857 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:27 crc kubenswrapper[4800]: I1125 17:28:27.216557 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b27vm" event={"ID":"92175acb-17bf-4bb8-b4cd-b1097f116691","Type":"ContainerStarted","Data":"85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724"} Nov 25 17:28:27 crc kubenswrapper[4800]: I1125 17:28:27.237591 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b27vm" podStartSLOduration=2.788042512 podStartE2EDuration="5.237574655s" podCreationTimestamp="2025-11-25 17:28:22 +0000 UTC" firstStartedPulling="2025-11-25 17:28:24.184090698 +0000 UTC m=+7865.238499200" lastFinishedPulling="2025-11-25 17:28:26.633622871 +0000 UTC m=+7867.688031343" observedRunningTime="2025-11-25 17:28:27.236270619 +0000 UTC m=+7868.290679121" watchObservedRunningTime="2025-11-25 17:28:27.237574655 +0000 UTC m=+7868.291983137" Nov 25 17:28:27 crc kubenswrapper[4800]: I1125 17:28:27.410262 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b5nms"] Nov 25 17:28:27 crc kubenswrapper[4800]: W1125 17:28:27.419975 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda13277de_0659_484a_9129_68147914939f.slice/crio-6062e7d61d91d5a4f6bc11041eb4fc9d47d00f4ad11ee6bdb229f30b1ea7a470 WatchSource:0}: Error finding container 6062e7d61d91d5a4f6bc11041eb4fc9d47d00f4ad11ee6bdb229f30b1ea7a470: Status 404 returned error can't find the container with id 6062e7d61d91d5a4f6bc11041eb4fc9d47d00f4ad11ee6bdb229f30b1ea7a470 Nov 25 17:28:28 crc kubenswrapper[4800]: I1125 17:28:28.226417 4800 generic.go:334] "Generic (PLEG): container finished" podID="a13277de-0659-484a-9129-68147914939f" containerID="b118c582ca4dbca7e10cc9213fcc6c0a3b59cc56babbe661f0181917af52252b" exitCode=0 Nov 25 17:28:28 crc kubenswrapper[4800]: I1125 17:28:28.226490 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5nms" event={"ID":"a13277de-0659-484a-9129-68147914939f","Type":"ContainerDied","Data":"b118c582ca4dbca7e10cc9213fcc6c0a3b59cc56babbe661f0181917af52252b"} Nov 25 17:28:28 crc kubenswrapper[4800]: I1125 17:28:28.226777 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5nms" event={"ID":"a13277de-0659-484a-9129-68147914939f","Type":"ContainerStarted","Data":"6062e7d61d91d5a4f6bc11041eb4fc9d47d00f4ad11ee6bdb229f30b1ea7a470"} Nov 25 17:28:30 crc kubenswrapper[4800]: I1125 17:28:30.268991 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5nms" event={"ID":"a13277de-0659-484a-9129-68147914939f","Type":"ContainerStarted","Data":"47a71f27c7eedf31ebbc9122209c46851dbe20e89d60de815614626960821a2a"} Nov 25 17:28:31 crc kubenswrapper[4800]: I1125 17:28:31.280735 4800 generic.go:334] "Generic (PLEG): container finished" podID="a13277de-0659-484a-9129-68147914939f" containerID="47a71f27c7eedf31ebbc9122209c46851dbe20e89d60de815614626960821a2a" exitCode=0 Nov 25 17:28:31 crc kubenswrapper[4800]: I1125 17:28:31.280989 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5nms" event={"ID":"a13277de-0659-484a-9129-68147914939f","Type":"ContainerDied","Data":"47a71f27c7eedf31ebbc9122209c46851dbe20e89d60de815614626960821a2a"} Nov 25 17:28:32 crc kubenswrapper[4800]: I1125 17:28:32.295191 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5nms" event={"ID":"a13277de-0659-484a-9129-68147914939f","Type":"ContainerStarted","Data":"ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3"} Nov 25 17:28:32 crc kubenswrapper[4800]: I1125 17:28:32.336318 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b5nms" podStartSLOduration=2.879080502 podStartE2EDuration="6.336294316s" podCreationTimestamp="2025-11-25 17:28:26 +0000 UTC" firstStartedPulling="2025-11-25 17:28:28.228059007 +0000 UTC m=+7869.282467489" lastFinishedPulling="2025-11-25 17:28:31.685272781 +0000 UTC m=+7872.739681303" observedRunningTime="2025-11-25 17:28:32.327584099 +0000 UTC m=+7873.381992581" watchObservedRunningTime="2025-11-25 17:28:32.336294316 +0000 UTC m=+7873.390702798" Nov 25 17:28:32 crc kubenswrapper[4800]: I1125 17:28:32.719566 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:32 crc kubenswrapper[4800]: I1125 17:28:32.719670 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:32 crc kubenswrapper[4800]: I1125 17:28:32.802068 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:33 crc kubenswrapper[4800]: I1125 17:28:33.371919 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:34 crc kubenswrapper[4800]: I1125 17:28:34.974568 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b27vm"] Nov 25 17:28:35 crc kubenswrapper[4800]: I1125 17:28:35.332776 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b27vm" podUID="92175acb-17bf-4bb8-b4cd-b1097f116691" containerName="registry-server" containerID="cri-o://85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724" gracePeriod=2 Nov 25 17:28:35 crc kubenswrapper[4800]: E1125 17:28:35.783073 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92175acb_17bf_4bb8_b4cd_b1097f116691.slice/crio-conmon-85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.060344 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.234607 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92175acb-17bf-4bb8-b4cd-b1097f116691-catalog-content\") pod \"92175acb-17bf-4bb8-b4cd-b1097f116691\" (UID: \"92175acb-17bf-4bb8-b4cd-b1097f116691\") " Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.234767 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92175acb-17bf-4bb8-b4cd-b1097f116691-utilities\") pod \"92175acb-17bf-4bb8-b4cd-b1097f116691\" (UID: \"92175acb-17bf-4bb8-b4cd-b1097f116691\") " Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.234906 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rd4fj\" (UniqueName: \"kubernetes.io/projected/92175acb-17bf-4bb8-b4cd-b1097f116691-kube-api-access-rd4fj\") pod \"92175acb-17bf-4bb8-b4cd-b1097f116691\" (UID: \"92175acb-17bf-4bb8-b4cd-b1097f116691\") " Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.236196 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92175acb-17bf-4bb8-b4cd-b1097f116691-utilities" (OuterVolumeSpecName: "utilities") pod "92175acb-17bf-4bb8-b4cd-b1097f116691" (UID: "92175acb-17bf-4bb8-b4cd-b1097f116691"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.240145 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92175acb-17bf-4bb8-b4cd-b1097f116691-kube-api-access-rd4fj" (OuterVolumeSpecName: "kube-api-access-rd4fj") pod "92175acb-17bf-4bb8-b4cd-b1097f116691" (UID: "92175acb-17bf-4bb8-b4cd-b1097f116691"). InnerVolumeSpecName "kube-api-access-rd4fj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.336974 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rd4fj\" (UniqueName: \"kubernetes.io/projected/92175acb-17bf-4bb8-b4cd-b1097f116691-kube-api-access-rd4fj\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.337006 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92175acb-17bf-4bb8-b4cd-b1097f116691-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.345200 4800 generic.go:334] "Generic (PLEG): container finished" podID="92175acb-17bf-4bb8-b4cd-b1097f116691" containerID="85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724" exitCode=0 Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.345248 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b27vm" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.345259 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b27vm" event={"ID":"92175acb-17bf-4bb8-b4cd-b1097f116691","Type":"ContainerDied","Data":"85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724"} Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.345310 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b27vm" event={"ID":"92175acb-17bf-4bb8-b4cd-b1097f116691","Type":"ContainerDied","Data":"85b934ecd8324a2396e10444cf482a224ad9ca7aefd57a463728ab7af3f7090f"} Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.345344 4800 scope.go:117] "RemoveContainer" containerID="85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.387968 4800 scope.go:117] "RemoveContainer" containerID="824b02a02ed737093935069e6ad693f3cbe6f352f0a70ca42acc7d983dfad5ee" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.413856 4800 scope.go:117] "RemoveContainer" containerID="4735f19baff3769c875b1f14dba493b88822e731a7bf365dd58860966e49ee1b" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.460764 4800 scope.go:117] "RemoveContainer" containerID="85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724" Nov 25 17:28:36 crc kubenswrapper[4800]: E1125 17:28:36.461312 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724\": container with ID starting with 85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724 not found: ID does not exist" containerID="85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.461354 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724"} err="failed to get container status \"85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724\": rpc error: code = NotFound desc = could not find container \"85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724\": container with ID starting with 85e0d0d9107e18ce4de2e502a3a1f8f1abf7eea8a703094051331039bf159724 not found: ID does not exist" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.461381 4800 scope.go:117] "RemoveContainer" containerID="824b02a02ed737093935069e6ad693f3cbe6f352f0a70ca42acc7d983dfad5ee" Nov 25 17:28:36 crc kubenswrapper[4800]: E1125 17:28:36.462278 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"824b02a02ed737093935069e6ad693f3cbe6f352f0a70ca42acc7d983dfad5ee\": container with ID starting with 824b02a02ed737093935069e6ad693f3cbe6f352f0a70ca42acc7d983dfad5ee not found: ID does not exist" containerID="824b02a02ed737093935069e6ad693f3cbe6f352f0a70ca42acc7d983dfad5ee" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.462309 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"824b02a02ed737093935069e6ad693f3cbe6f352f0a70ca42acc7d983dfad5ee"} err="failed to get container status \"824b02a02ed737093935069e6ad693f3cbe6f352f0a70ca42acc7d983dfad5ee\": rpc error: code = NotFound desc = could not find container \"824b02a02ed737093935069e6ad693f3cbe6f352f0a70ca42acc7d983dfad5ee\": container with ID starting with 824b02a02ed737093935069e6ad693f3cbe6f352f0a70ca42acc7d983dfad5ee not found: ID does not exist" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.462329 4800 scope.go:117] "RemoveContainer" containerID="4735f19baff3769c875b1f14dba493b88822e731a7bf365dd58860966e49ee1b" Nov 25 17:28:36 crc kubenswrapper[4800]: E1125 17:28:36.462733 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4735f19baff3769c875b1f14dba493b88822e731a7bf365dd58860966e49ee1b\": container with ID starting with 4735f19baff3769c875b1f14dba493b88822e731a7bf365dd58860966e49ee1b not found: ID does not exist" containerID="4735f19baff3769c875b1f14dba493b88822e731a7bf365dd58860966e49ee1b" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.462757 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4735f19baff3769c875b1f14dba493b88822e731a7bf365dd58860966e49ee1b"} err="failed to get container status \"4735f19baff3769c875b1f14dba493b88822e731a7bf365dd58860966e49ee1b\": rpc error: code = NotFound desc = could not find container \"4735f19baff3769c875b1f14dba493b88822e731a7bf365dd58860966e49ee1b\": container with ID starting with 4735f19baff3769c875b1f14dba493b88822e731a7bf365dd58860966e49ee1b not found: ID does not exist" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.572214 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92175acb-17bf-4bb8-b4cd-b1097f116691-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92175acb-17bf-4bb8-b4cd-b1097f116691" (UID: "92175acb-17bf-4bb8-b4cd-b1097f116691"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.642298 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92175acb-17bf-4bb8-b4cd-b1097f116691-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.716122 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b27vm"] Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.726815 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b27vm"] Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.896320 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:36 crc kubenswrapper[4800]: I1125 17:28:36.897150 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:37 crc kubenswrapper[4800]: I1125 17:28:37.802684 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92175acb-17bf-4bb8-b4cd-b1097f116691" path="/var/lib/kubelet/pods/92175acb-17bf-4bb8-b4cd-b1097f116691/volumes" Nov 25 17:28:37 crc kubenswrapper[4800]: I1125 17:28:37.948889 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b5nms" podUID="a13277de-0659-484a-9129-68147914939f" containerName="registry-server" probeResult="failure" output=< Nov 25 17:28:37 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 17:28:37 crc kubenswrapper[4800]: > Nov 25 17:28:42 crc kubenswrapper[4800]: I1125 17:28:42.639725 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:28:42 crc kubenswrapper[4800]: I1125 17:28:42.640445 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:28:46 crc kubenswrapper[4800]: I1125 17:28:46.984544 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:47 crc kubenswrapper[4800]: I1125 17:28:47.061502 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:47 crc kubenswrapper[4800]: I1125 17:28:47.238984 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b5nms"] Nov 25 17:28:48 crc kubenswrapper[4800]: I1125 17:28:48.488226 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b5nms" podUID="a13277de-0659-484a-9129-68147914939f" containerName="registry-server" containerID="cri-o://ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3" gracePeriod=2 Nov 25 17:28:48 crc kubenswrapper[4800]: I1125 17:28:48.971223 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.040992 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a13277de-0659-484a-9129-68147914939f-utilities\") pod \"a13277de-0659-484a-9129-68147914939f\" (UID: \"a13277de-0659-484a-9129-68147914939f\") " Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.041128 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6q6mm\" (UniqueName: \"kubernetes.io/projected/a13277de-0659-484a-9129-68147914939f-kube-api-access-6q6mm\") pod \"a13277de-0659-484a-9129-68147914939f\" (UID: \"a13277de-0659-484a-9129-68147914939f\") " Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.041194 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a13277de-0659-484a-9129-68147914939f-catalog-content\") pod \"a13277de-0659-484a-9129-68147914939f\" (UID: \"a13277de-0659-484a-9129-68147914939f\") " Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.044545 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a13277de-0659-484a-9129-68147914939f-utilities" (OuterVolumeSpecName: "utilities") pod "a13277de-0659-484a-9129-68147914939f" (UID: "a13277de-0659-484a-9129-68147914939f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.050469 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a13277de-0659-484a-9129-68147914939f-kube-api-access-6q6mm" (OuterVolumeSpecName: "kube-api-access-6q6mm") pod "a13277de-0659-484a-9129-68147914939f" (UID: "a13277de-0659-484a-9129-68147914939f"). InnerVolumeSpecName "kube-api-access-6q6mm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.142735 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a13277de-0659-484a-9129-68147914939f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.142776 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6q6mm\" (UniqueName: \"kubernetes.io/projected/a13277de-0659-484a-9129-68147914939f-kube-api-access-6q6mm\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.156091 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a13277de-0659-484a-9129-68147914939f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a13277de-0659-484a-9129-68147914939f" (UID: "a13277de-0659-484a-9129-68147914939f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.244512 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a13277de-0659-484a-9129-68147914939f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.504710 4800 generic.go:334] "Generic (PLEG): container finished" podID="a13277de-0659-484a-9129-68147914939f" containerID="ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3" exitCode=0 Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.504768 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5nms" event={"ID":"a13277de-0659-484a-9129-68147914939f","Type":"ContainerDied","Data":"ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3"} Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.504809 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5nms" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.504893 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5nms" event={"ID":"a13277de-0659-484a-9129-68147914939f","Type":"ContainerDied","Data":"6062e7d61d91d5a4f6bc11041eb4fc9d47d00f4ad11ee6bdb229f30b1ea7a470"} Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.504944 4800 scope.go:117] "RemoveContainer" containerID="ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.539092 4800 scope.go:117] "RemoveContainer" containerID="47a71f27c7eedf31ebbc9122209c46851dbe20e89d60de815614626960821a2a" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.558651 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b5nms"] Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.569603 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b5nms"] Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.595487 4800 scope.go:117] "RemoveContainer" containerID="b118c582ca4dbca7e10cc9213fcc6c0a3b59cc56babbe661f0181917af52252b" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.638010 4800 scope.go:117] "RemoveContainer" containerID="ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3" Nov 25 17:28:49 crc kubenswrapper[4800]: E1125 17:28:49.638436 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3\": container with ID starting with ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3 not found: ID does not exist" containerID="ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.638477 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3"} err="failed to get container status \"ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3\": rpc error: code = NotFound desc = could not find container \"ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3\": container with ID starting with ca7ecaeac0003bf3b0b39d8e4d1bfc3bd935db839cfe9785978bf1fa2717f1b3 not found: ID does not exist" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.638496 4800 scope.go:117] "RemoveContainer" containerID="47a71f27c7eedf31ebbc9122209c46851dbe20e89d60de815614626960821a2a" Nov 25 17:28:49 crc kubenswrapper[4800]: E1125 17:28:49.638784 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47a71f27c7eedf31ebbc9122209c46851dbe20e89d60de815614626960821a2a\": container with ID starting with 47a71f27c7eedf31ebbc9122209c46851dbe20e89d60de815614626960821a2a not found: ID does not exist" containerID="47a71f27c7eedf31ebbc9122209c46851dbe20e89d60de815614626960821a2a" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.638812 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47a71f27c7eedf31ebbc9122209c46851dbe20e89d60de815614626960821a2a"} err="failed to get container status \"47a71f27c7eedf31ebbc9122209c46851dbe20e89d60de815614626960821a2a\": rpc error: code = NotFound desc = could not find container \"47a71f27c7eedf31ebbc9122209c46851dbe20e89d60de815614626960821a2a\": container with ID starting with 47a71f27c7eedf31ebbc9122209c46851dbe20e89d60de815614626960821a2a not found: ID does not exist" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.638831 4800 scope.go:117] "RemoveContainer" containerID="b118c582ca4dbca7e10cc9213fcc6c0a3b59cc56babbe661f0181917af52252b" Nov 25 17:28:49 crc kubenswrapper[4800]: E1125 17:28:49.639344 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b118c582ca4dbca7e10cc9213fcc6c0a3b59cc56babbe661f0181917af52252b\": container with ID starting with b118c582ca4dbca7e10cc9213fcc6c0a3b59cc56babbe661f0181917af52252b not found: ID does not exist" containerID="b118c582ca4dbca7e10cc9213fcc6c0a3b59cc56babbe661f0181917af52252b" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.639367 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b118c582ca4dbca7e10cc9213fcc6c0a3b59cc56babbe661f0181917af52252b"} err="failed to get container status \"b118c582ca4dbca7e10cc9213fcc6c0a3b59cc56babbe661f0181917af52252b\": rpc error: code = NotFound desc = could not find container \"b118c582ca4dbca7e10cc9213fcc6c0a3b59cc56babbe661f0181917af52252b\": container with ID starting with b118c582ca4dbca7e10cc9213fcc6c0a3b59cc56babbe661f0181917af52252b not found: ID does not exist" Nov 25 17:28:49 crc kubenswrapper[4800]: I1125 17:28:49.806630 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a13277de-0659-484a-9129-68147914939f" path="/var/lib/kubelet/pods/a13277de-0659-484a-9129-68147914939f/volumes" Nov 25 17:29:12 crc kubenswrapper[4800]: I1125 17:29:12.640157 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:29:12 crc kubenswrapper[4800]: I1125 17:29:12.640739 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:29:12 crc kubenswrapper[4800]: I1125 17:29:12.640804 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 17:29:12 crc kubenswrapper[4800]: I1125 17:29:12.641916 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8f44cf5110cad2c84d575cff5470f84ba7a97098df2e62b4b9d02fa05bcea5e6"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:29:12 crc kubenswrapper[4800]: I1125 17:29:12.642020 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://8f44cf5110cad2c84d575cff5470f84ba7a97098df2e62b4b9d02fa05bcea5e6" gracePeriod=600 Nov 25 17:29:13 crc kubenswrapper[4800]: I1125 17:29:13.794968 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="8f44cf5110cad2c84d575cff5470f84ba7a97098df2e62b4b9d02fa05bcea5e6" exitCode=0 Nov 25 17:29:13 crc kubenswrapper[4800]: I1125 17:29:13.805952 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"8f44cf5110cad2c84d575cff5470f84ba7a97098df2e62b4b9d02fa05bcea5e6"} Nov 25 17:29:13 crc kubenswrapper[4800]: I1125 17:29:13.806024 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2"} Nov 25 17:29:13 crc kubenswrapper[4800]: I1125 17:29:13.806057 4800 scope.go:117] "RemoveContainer" containerID="9bf21f8114acc6955527cc5e81e91fe4ccf29bce27a957659ecc0f6c862be00d" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.189162 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2"] Nov 25 17:30:00 crc kubenswrapper[4800]: E1125 17:30:00.191234 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a13277de-0659-484a-9129-68147914939f" containerName="registry-server" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.191354 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a13277de-0659-484a-9129-68147914939f" containerName="registry-server" Nov 25 17:30:00 crc kubenswrapper[4800]: E1125 17:30:00.191444 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a13277de-0659-484a-9129-68147914939f" containerName="extract-utilities" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.191520 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a13277de-0659-484a-9129-68147914939f" containerName="extract-utilities" Nov 25 17:30:00 crc kubenswrapper[4800]: E1125 17:30:00.191609 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92175acb-17bf-4bb8-b4cd-b1097f116691" containerName="extract-utilities" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.191698 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="92175acb-17bf-4bb8-b4cd-b1097f116691" containerName="extract-utilities" Nov 25 17:30:00 crc kubenswrapper[4800]: E1125 17:30:00.191785 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92175acb-17bf-4bb8-b4cd-b1097f116691" containerName="extract-content" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.191970 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="92175acb-17bf-4bb8-b4cd-b1097f116691" containerName="extract-content" Nov 25 17:30:00 crc kubenswrapper[4800]: E1125 17:30:00.192083 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92175acb-17bf-4bb8-b4cd-b1097f116691" containerName="registry-server" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.192161 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="92175acb-17bf-4bb8-b4cd-b1097f116691" containerName="registry-server" Nov 25 17:30:00 crc kubenswrapper[4800]: E1125 17:30:00.192240 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a13277de-0659-484a-9129-68147914939f" containerName="extract-content" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.192335 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a13277de-0659-484a-9129-68147914939f" containerName="extract-content" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.192663 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="a13277de-0659-484a-9129-68147914939f" containerName="registry-server" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.192771 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="92175acb-17bf-4bb8-b4cd-b1097f116691" containerName="registry-server" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.193663 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.196705 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.196903 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.199811 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2"] Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.345784 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzznj\" (UniqueName: \"kubernetes.io/projected/05835060-0875-43ba-ab10-5d437d6d5a40-kube-api-access-fzznj\") pod \"collect-profiles-29401530-5mjz2\" (UID: \"05835060-0875-43ba-ab10-5d437d6d5a40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.346046 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05835060-0875-43ba-ab10-5d437d6d5a40-secret-volume\") pod \"collect-profiles-29401530-5mjz2\" (UID: \"05835060-0875-43ba-ab10-5d437d6d5a40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.346247 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05835060-0875-43ba-ab10-5d437d6d5a40-config-volume\") pod \"collect-profiles-29401530-5mjz2\" (UID: \"05835060-0875-43ba-ab10-5d437d6d5a40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.448038 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05835060-0875-43ba-ab10-5d437d6d5a40-secret-volume\") pod \"collect-profiles-29401530-5mjz2\" (UID: \"05835060-0875-43ba-ab10-5d437d6d5a40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.448462 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05835060-0875-43ba-ab10-5d437d6d5a40-config-volume\") pod \"collect-profiles-29401530-5mjz2\" (UID: \"05835060-0875-43ba-ab10-5d437d6d5a40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.448693 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzznj\" (UniqueName: \"kubernetes.io/projected/05835060-0875-43ba-ab10-5d437d6d5a40-kube-api-access-fzznj\") pod \"collect-profiles-29401530-5mjz2\" (UID: \"05835060-0875-43ba-ab10-5d437d6d5a40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.449276 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05835060-0875-43ba-ab10-5d437d6d5a40-config-volume\") pod \"collect-profiles-29401530-5mjz2\" (UID: \"05835060-0875-43ba-ab10-5d437d6d5a40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.467061 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05835060-0875-43ba-ab10-5d437d6d5a40-secret-volume\") pod \"collect-profiles-29401530-5mjz2\" (UID: \"05835060-0875-43ba-ab10-5d437d6d5a40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.476647 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzznj\" (UniqueName: \"kubernetes.io/projected/05835060-0875-43ba-ab10-5d437d6d5a40-kube-api-access-fzznj\") pod \"collect-profiles-29401530-5mjz2\" (UID: \"05835060-0875-43ba-ab10-5d437d6d5a40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.531590 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:00 crc kubenswrapper[4800]: I1125 17:30:00.983643 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2"] Nov 25 17:30:00 crc kubenswrapper[4800]: W1125 17:30:00.993423 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05835060_0875_43ba_ab10_5d437d6d5a40.slice/crio-8546577ca67f72fd9797b6e70104bd48cfadc31e9a88496e4fac04e94f852d53 WatchSource:0}: Error finding container 8546577ca67f72fd9797b6e70104bd48cfadc31e9a88496e4fac04e94f852d53: Status 404 returned error can't find the container with id 8546577ca67f72fd9797b6e70104bd48cfadc31e9a88496e4fac04e94f852d53 Nov 25 17:30:01 crc kubenswrapper[4800]: I1125 17:30:01.366084 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" event={"ID":"05835060-0875-43ba-ab10-5d437d6d5a40","Type":"ContainerStarted","Data":"560cc7b9e119ae79e60c282df0b3a95b8a15a38c3400bea39688440efb1ba2de"} Nov 25 17:30:01 crc kubenswrapper[4800]: I1125 17:30:01.366447 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" event={"ID":"05835060-0875-43ba-ab10-5d437d6d5a40","Type":"ContainerStarted","Data":"8546577ca67f72fd9797b6e70104bd48cfadc31e9a88496e4fac04e94f852d53"} Nov 25 17:30:01 crc kubenswrapper[4800]: I1125 17:30:01.400971 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" podStartSLOduration=1.4009457 podStartE2EDuration="1.4009457s" podCreationTimestamp="2025-11-25 17:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:30:01.380988057 +0000 UTC m=+7962.435396549" watchObservedRunningTime="2025-11-25 17:30:01.4009457 +0000 UTC m=+7962.455354192" Nov 25 17:30:02 crc kubenswrapper[4800]: I1125 17:30:02.379945 4800 generic.go:334] "Generic (PLEG): container finished" podID="05835060-0875-43ba-ab10-5d437d6d5a40" containerID="560cc7b9e119ae79e60c282df0b3a95b8a15a38c3400bea39688440efb1ba2de" exitCode=0 Nov 25 17:30:02 crc kubenswrapper[4800]: I1125 17:30:02.380005 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" event={"ID":"05835060-0875-43ba-ab10-5d437d6d5a40","Type":"ContainerDied","Data":"560cc7b9e119ae79e60c282df0b3a95b8a15a38c3400bea39688440efb1ba2de"} Nov 25 17:30:03 crc kubenswrapper[4800]: I1125 17:30:03.397748 4800 generic.go:334] "Generic (PLEG): container finished" podID="9f498125-ffd2-4526-8234-3e89d84f5753" containerID="b03c0df3eb8612a617a189531059cbc6fb2f99fe2c6b82ed01b7250b31813a1c" exitCode=0 Nov 25 17:30:03 crc kubenswrapper[4800]: I1125 17:30:03.397924 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-full" event={"ID":"9f498125-ffd2-4526-8234-3e89d84f5753","Type":"ContainerDied","Data":"b03c0df3eb8612a617a189531059cbc6fb2f99fe2c6b82ed01b7250b31813a1c"} Nov 25 17:30:03 crc kubenswrapper[4800]: I1125 17:30:03.850505 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:03 crc kubenswrapper[4800]: I1125 17:30:03.947723 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05835060-0875-43ba-ab10-5d437d6d5a40-secret-volume\") pod \"05835060-0875-43ba-ab10-5d437d6d5a40\" (UID: \"05835060-0875-43ba-ab10-5d437d6d5a40\") " Nov 25 17:30:03 crc kubenswrapper[4800]: I1125 17:30:03.948392 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzznj\" (UniqueName: \"kubernetes.io/projected/05835060-0875-43ba-ab10-5d437d6d5a40-kube-api-access-fzznj\") pod \"05835060-0875-43ba-ab10-5d437d6d5a40\" (UID: \"05835060-0875-43ba-ab10-5d437d6d5a40\") " Nov 25 17:30:03 crc kubenswrapper[4800]: I1125 17:30:03.949316 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05835060-0875-43ba-ab10-5d437d6d5a40-config-volume\") pod \"05835060-0875-43ba-ab10-5d437d6d5a40\" (UID: \"05835060-0875-43ba-ab10-5d437d6d5a40\") " Nov 25 17:30:03 crc kubenswrapper[4800]: I1125 17:30:03.949999 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05835060-0875-43ba-ab10-5d437d6d5a40-config-volume" (OuterVolumeSpecName: "config-volume") pod "05835060-0875-43ba-ab10-5d437d6d5a40" (UID: "05835060-0875-43ba-ab10-5d437d6d5a40"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:30:03 crc kubenswrapper[4800]: I1125 17:30:03.950611 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05835060-0875-43ba-ab10-5d437d6d5a40-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:03 crc kubenswrapper[4800]: I1125 17:30:03.958643 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05835060-0875-43ba-ab10-5d437d6d5a40-kube-api-access-fzznj" (OuterVolumeSpecName: "kube-api-access-fzznj") pod "05835060-0875-43ba-ab10-5d437d6d5a40" (UID: "05835060-0875-43ba-ab10-5d437d6d5a40"). InnerVolumeSpecName "kube-api-access-fzznj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:30:03 crc kubenswrapper[4800]: I1125 17:30:03.958722 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05835060-0875-43ba-ab10-5d437d6d5a40-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "05835060-0875-43ba-ab10-5d437d6d5a40" (UID: "05835060-0875-43ba-ab10-5d437d6d5a40"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:30:04 crc kubenswrapper[4800]: I1125 17:30:04.053172 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05835060-0875-43ba-ab10-5d437d6d5a40-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:04 crc kubenswrapper[4800]: I1125 17:30:04.053243 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzznj\" (UniqueName: \"kubernetes.io/projected/05835060-0875-43ba-ab10-5d437d6d5a40-kube-api-access-fzznj\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:04 crc kubenswrapper[4800]: I1125 17:30:04.408284 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" Nov 25 17:30:04 crc kubenswrapper[4800]: I1125 17:30:04.413967 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2" event={"ID":"05835060-0875-43ba-ab10-5d437d6d5a40","Type":"ContainerDied","Data":"8546577ca67f72fd9797b6e70104bd48cfadc31e9a88496e4fac04e94f852d53"} Nov 25 17:30:04 crc kubenswrapper[4800]: I1125 17:30:04.414009 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8546577ca67f72fd9797b6e70104bd48cfadc31e9a88496e4fac04e94f852d53" Nov 25 17:30:04 crc kubenswrapper[4800]: I1125 17:30:04.478916 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs"] Nov 25 17:30:04 crc kubenswrapper[4800]: I1125 17:30:04.485793 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401485-mf2zs"] Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.277809 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-full" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.380945 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f498125-ffd2-4526-8234-3e89d84f5753-config-data\") pod \"9f498125-ffd2-4526-8234-3e89d84f5753\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.381006 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9f498125-ffd2-4526-8234-3e89d84f5753-openstack-config\") pod \"9f498125-ffd2-4526-8234-3e89d84f5753\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.381122 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/9f498125-ffd2-4526-8234-3e89d84f5753-test-operator-ephemeral-temporary\") pod \"9f498125-ffd2-4526-8234-3e89d84f5753\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.381168 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-openstack-config-secret\") pod \"9f498125-ffd2-4526-8234-3e89d84f5753\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.381206 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ca-certs\") pod \"9f498125-ffd2-4526-8234-3e89d84f5753\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.381252 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ssh-key\") pod \"9f498125-ffd2-4526-8234-3e89d84f5753\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.381288 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/9f498125-ffd2-4526-8234-3e89d84f5753-test-operator-ephemeral-workdir\") pod \"9f498125-ffd2-4526-8234-3e89d84f5753\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.381322 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"9f498125-ffd2-4526-8234-3e89d84f5753\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.381365 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ceph\") pod \"9f498125-ffd2-4526-8234-3e89d84f5753\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.381401 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg77c\" (UniqueName: \"kubernetes.io/projected/9f498125-ffd2-4526-8234-3e89d84f5753-kube-api-access-mg77c\") pod \"9f498125-ffd2-4526-8234-3e89d84f5753\" (UID: \"9f498125-ffd2-4526-8234-3e89d84f5753\") " Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.382294 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f498125-ffd2-4526-8234-3e89d84f5753-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "9f498125-ffd2-4526-8234-3e89d84f5753" (UID: "9f498125-ffd2-4526-8234-3e89d84f5753"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.382603 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f498125-ffd2-4526-8234-3e89d84f5753-config-data" (OuterVolumeSpecName: "config-data") pod "9f498125-ffd2-4526-8234-3e89d84f5753" (UID: "9f498125-ffd2-4526-8234-3e89d84f5753"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.395056 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f498125-ffd2-4526-8234-3e89d84f5753-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "9f498125-ffd2-4526-8234-3e89d84f5753" (UID: "9f498125-ffd2-4526-8234-3e89d84f5753"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.405499 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ceph" (OuterVolumeSpecName: "ceph") pod "9f498125-ffd2-4526-8234-3e89d84f5753" (UID: "9f498125-ffd2-4526-8234-3e89d84f5753"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.405866 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f498125-ffd2-4526-8234-3e89d84f5753-kube-api-access-mg77c" (OuterVolumeSpecName: "kube-api-access-mg77c") pod "9f498125-ffd2-4526-8234-3e89d84f5753" (UID: "9f498125-ffd2-4526-8234-3e89d84f5753"). InnerVolumeSpecName "kube-api-access-mg77c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.410201 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "test-operator-logs") pod "9f498125-ffd2-4526-8234-3e89d84f5753" (UID: "9f498125-ffd2-4526-8234-3e89d84f5753"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.415384 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "9f498125-ffd2-4526-8234-3e89d84f5753" (UID: "9f498125-ffd2-4526-8234-3e89d84f5753"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.418748 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "9f498125-ffd2-4526-8234-3e89d84f5753" (UID: "9f498125-ffd2-4526-8234-3e89d84f5753"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.419491 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-full" event={"ID":"9f498125-ffd2-4526-8234-3e89d84f5753","Type":"ContainerDied","Data":"3b0f62b8a4db084b9c05fe2f620aca0b36a87ee9ee1038f7e1a7d362da71b046"} Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.419536 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b0f62b8a4db084b9c05fe2f620aca0b36a87ee9ee1038f7e1a7d362da71b046" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.419711 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-full" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.428802 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9f498125-ffd2-4526-8234-3e89d84f5753" (UID: "9f498125-ffd2-4526-8234-3e89d84f5753"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.440973 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f498125-ffd2-4526-8234-3e89d84f5753-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "9f498125-ffd2-4526-8234-3e89d84f5753" (UID: "9f498125-ffd2-4526-8234-3e89d84f5753"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.484995 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest-s01-single-test"] Nov 25 17:30:05 crc kubenswrapper[4800]: E1125 17:30:05.485391 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05835060-0875-43ba-ab10-5d437d6d5a40" containerName="collect-profiles" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.485408 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="05835060-0875-43ba-ab10-5d437d6d5a40" containerName="collect-profiles" Nov 25 17:30:05 crc kubenswrapper[4800]: E1125 17:30:05.485422 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f498125-ffd2-4526-8234-3e89d84f5753" containerName="tempest-tests-tempest-tests-runner" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.485430 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f498125-ffd2-4526-8234-3e89d84f5753" containerName="tempest-tests-tempest-tests-runner" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.485681 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="05835060-0875-43ba-ab10-5d437d6d5a40" containerName="collect-profiles" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.485700 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f498125-ffd2-4526-8234-3e89d84f5753" containerName="tempest-tests-tempest-tests-runner" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.486330 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.486369 4800 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.486383 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.486417 4800 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/9f498125-ffd2-4526-8234-3e89d84f5753-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.486462 4800 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.486478 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f498125-ffd2-4526-8234-3e89d84f5753-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.486490 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg77c\" (UniqueName: \"kubernetes.io/projected/9f498125-ffd2-4526-8234-3e89d84f5753-kube-api-access-mg77c\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.486504 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f498125-ffd2-4526-8234-3e89d84f5753-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.486515 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9f498125-ffd2-4526-8234-3e89d84f5753-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.486527 4800 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/9f498125-ffd2-4526-8234-3e89d84f5753-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.495598 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.504791 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s1" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.521005 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s1" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.526675 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s01-single-test"] Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.553105 4800 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.588819 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-openstack-config\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.588930 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-openstack-config-secret\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.588982 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-config-data\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.589049 4800 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.690289 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ceph\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.690728 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krmm5\" (UniqueName: \"kubernetes.io/projected/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-kube-api-access-krmm5\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.690810 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ssh-key\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.690859 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-openstack-config\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.690890 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.690911 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ca-certs\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.690963 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-openstack-config-secret\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.691016 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.691040 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.691063 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-config-data\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.692375 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-config-data\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.693703 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-openstack-config\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.696605 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-openstack-config-secret\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.793191 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.793257 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ca-certs\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.793424 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.793462 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.793556 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ceph\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.793609 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krmm5\" (UniqueName: \"kubernetes.io/projected/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-kube-api-access-krmm5\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.793727 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ssh-key\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.793828 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.794182 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.794269 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.798998 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ssh-key\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.801103 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ceph\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.802995 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ca-certs\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.808480 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a33414b-786b-4b41-a5f2-3ec4fa3df4cb" path="/var/lib/kubelet/pods/5a33414b-786b-4b41-a5f2-3ec4fa3df4cb/volumes" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.817627 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krmm5\" (UniqueName: \"kubernetes.io/projected/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-kube-api-access-krmm5\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:05 crc kubenswrapper[4800]: I1125 17:30:05.847169 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest-s01-single-test\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:06 crc kubenswrapper[4800]: I1125 17:30:06.137309 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:30:06 crc kubenswrapper[4800]: I1125 17:30:06.759557 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s01-single-test"] Nov 25 17:30:07 crc kubenswrapper[4800]: I1125 17:30:07.445466 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-test" event={"ID":"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f","Type":"ContainerStarted","Data":"41c75593056051bf4bc695a1cd511fa08ba3b3559144aeb0b8bc3a367724afbc"} Nov 25 17:30:08 crc kubenswrapper[4800]: I1125 17:30:08.459467 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-test" event={"ID":"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f","Type":"ContainerStarted","Data":"1ebf395b86045770c15794bafcca4446b61704cace1630221d6d3ad0d1698632"} Nov 25 17:30:08 crc kubenswrapper[4800]: I1125 17:30:08.508200 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest-s01-single-test" podStartSLOduration=3.508167521 podStartE2EDuration="3.508167521s" podCreationTimestamp="2025-11-25 17:30:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:30:08.48645814 +0000 UTC m=+7969.540866622" watchObservedRunningTime="2025-11-25 17:30:08.508167521 +0000 UTC m=+7969.562576023" Nov 25 17:30:33 crc kubenswrapper[4800]: I1125 17:30:33.643911 4800 scope.go:117] "RemoveContainer" containerID="0728ddfc2512de118284fd4476c1e4580352c0843d7da90b1dd04cd156625594" Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.177653 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pdzh2"] Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.183599 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.231454 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pdzh2"] Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.249658 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/975bb927-fc71-49d5-8e02-6576974b6be6-catalog-content\") pod \"certified-operators-pdzh2\" (UID: \"975bb927-fc71-49d5-8e02-6576974b6be6\") " pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.249858 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/975bb927-fc71-49d5-8e02-6576974b6be6-utilities\") pod \"certified-operators-pdzh2\" (UID: \"975bb927-fc71-49d5-8e02-6576974b6be6\") " pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.249903 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crc77\" (UniqueName: \"kubernetes.io/projected/975bb927-fc71-49d5-8e02-6576974b6be6-kube-api-access-crc77\") pod \"certified-operators-pdzh2\" (UID: \"975bb927-fc71-49d5-8e02-6576974b6be6\") " pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.351646 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/975bb927-fc71-49d5-8e02-6576974b6be6-catalog-content\") pod \"certified-operators-pdzh2\" (UID: \"975bb927-fc71-49d5-8e02-6576974b6be6\") " pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.351884 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/975bb927-fc71-49d5-8e02-6576974b6be6-utilities\") pod \"certified-operators-pdzh2\" (UID: \"975bb927-fc71-49d5-8e02-6576974b6be6\") " pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.351931 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crc77\" (UniqueName: \"kubernetes.io/projected/975bb927-fc71-49d5-8e02-6576974b6be6-kube-api-access-crc77\") pod \"certified-operators-pdzh2\" (UID: \"975bb927-fc71-49d5-8e02-6576974b6be6\") " pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.352785 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/975bb927-fc71-49d5-8e02-6576974b6be6-catalog-content\") pod \"certified-operators-pdzh2\" (UID: \"975bb927-fc71-49d5-8e02-6576974b6be6\") " pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.352857 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/975bb927-fc71-49d5-8e02-6576974b6be6-utilities\") pod \"certified-operators-pdzh2\" (UID: \"975bb927-fc71-49d5-8e02-6576974b6be6\") " pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.378777 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crc77\" (UniqueName: \"kubernetes.io/projected/975bb927-fc71-49d5-8e02-6576974b6be6-kube-api-access-crc77\") pod \"certified-operators-pdzh2\" (UID: \"975bb927-fc71-49d5-8e02-6576974b6be6\") " pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:06 crc kubenswrapper[4800]: I1125 17:31:06.513042 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:07 crc kubenswrapper[4800]: I1125 17:31:07.046691 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pdzh2"] Nov 25 17:31:07 crc kubenswrapper[4800]: I1125 17:31:07.175486 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdzh2" event={"ID":"975bb927-fc71-49d5-8e02-6576974b6be6","Type":"ContainerStarted","Data":"76ae5f5975adca0ea743059ae5d238d55ec4e1f13849f606b219c85075d24f46"} Nov 25 17:31:08 crc kubenswrapper[4800]: I1125 17:31:08.188863 4800 generic.go:334] "Generic (PLEG): container finished" podID="975bb927-fc71-49d5-8e02-6576974b6be6" containerID="6f53a2291951ffa1f5e168aec9c29c29fb7844fb402e3d7898fec5abb52940ad" exitCode=0 Nov 25 17:31:08 crc kubenswrapper[4800]: I1125 17:31:08.188961 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdzh2" event={"ID":"975bb927-fc71-49d5-8e02-6576974b6be6","Type":"ContainerDied","Data":"6f53a2291951ffa1f5e168aec9c29c29fb7844fb402e3d7898fec5abb52940ad"} Nov 25 17:31:10 crc kubenswrapper[4800]: I1125 17:31:10.210778 4800 generic.go:334] "Generic (PLEG): container finished" podID="975bb927-fc71-49d5-8e02-6576974b6be6" containerID="cc35505342faf594a0186c0b9854bd30c697a03e3aa524751f3a5ad1df5f3e8e" exitCode=0 Nov 25 17:31:10 crc kubenswrapper[4800]: I1125 17:31:10.210894 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdzh2" event={"ID":"975bb927-fc71-49d5-8e02-6576974b6be6","Type":"ContainerDied","Data":"cc35505342faf594a0186c0b9854bd30c697a03e3aa524751f3a5ad1df5f3e8e"} Nov 25 17:31:11 crc kubenswrapper[4800]: I1125 17:31:11.225566 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdzh2" event={"ID":"975bb927-fc71-49d5-8e02-6576974b6be6","Type":"ContainerStarted","Data":"cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47"} Nov 25 17:31:11 crc kubenswrapper[4800]: I1125 17:31:11.248870 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pdzh2" podStartSLOduration=2.808993719 podStartE2EDuration="5.248802378s" podCreationTimestamp="2025-11-25 17:31:06 +0000 UTC" firstStartedPulling="2025-11-25 17:31:08.192134284 +0000 UTC m=+8029.246542776" lastFinishedPulling="2025-11-25 17:31:10.631942943 +0000 UTC m=+8031.686351435" observedRunningTime="2025-11-25 17:31:11.248284264 +0000 UTC m=+8032.302692806" watchObservedRunningTime="2025-11-25 17:31:11.248802378 +0000 UTC m=+8032.303210890" Nov 25 17:31:16 crc kubenswrapper[4800]: I1125 17:31:16.513506 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:16 crc kubenswrapper[4800]: I1125 17:31:16.516019 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:16 crc kubenswrapper[4800]: I1125 17:31:16.601340 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:17 crc kubenswrapper[4800]: I1125 17:31:17.374213 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:17 crc kubenswrapper[4800]: I1125 17:31:17.453110 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pdzh2"] Nov 25 17:31:19 crc kubenswrapper[4800]: I1125 17:31:19.319143 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pdzh2" podUID="975bb927-fc71-49d5-8e02-6576974b6be6" containerName="registry-server" containerID="cri-o://cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47" gracePeriod=2 Nov 25 17:31:19 crc kubenswrapper[4800]: I1125 17:31:19.931255 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.001718 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/975bb927-fc71-49d5-8e02-6576974b6be6-catalog-content\") pod \"975bb927-fc71-49d5-8e02-6576974b6be6\" (UID: \"975bb927-fc71-49d5-8e02-6576974b6be6\") " Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.001906 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/975bb927-fc71-49d5-8e02-6576974b6be6-utilities\") pod \"975bb927-fc71-49d5-8e02-6576974b6be6\" (UID: \"975bb927-fc71-49d5-8e02-6576974b6be6\") " Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.002011 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-crc77\" (UniqueName: \"kubernetes.io/projected/975bb927-fc71-49d5-8e02-6576974b6be6-kube-api-access-crc77\") pod \"975bb927-fc71-49d5-8e02-6576974b6be6\" (UID: \"975bb927-fc71-49d5-8e02-6576974b6be6\") " Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.002685 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/975bb927-fc71-49d5-8e02-6576974b6be6-utilities" (OuterVolumeSpecName: "utilities") pod "975bb927-fc71-49d5-8e02-6576974b6be6" (UID: "975bb927-fc71-49d5-8e02-6576974b6be6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.007456 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/975bb927-fc71-49d5-8e02-6576974b6be6-kube-api-access-crc77" (OuterVolumeSpecName: "kube-api-access-crc77") pod "975bb927-fc71-49d5-8e02-6576974b6be6" (UID: "975bb927-fc71-49d5-8e02-6576974b6be6"). InnerVolumeSpecName "kube-api-access-crc77". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.052410 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/975bb927-fc71-49d5-8e02-6576974b6be6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "975bb927-fc71-49d5-8e02-6576974b6be6" (UID: "975bb927-fc71-49d5-8e02-6576974b6be6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.104580 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/975bb927-fc71-49d5-8e02-6576974b6be6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.104951 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-crc77\" (UniqueName: \"kubernetes.io/projected/975bb927-fc71-49d5-8e02-6576974b6be6-kube-api-access-crc77\") on node \"crc\" DevicePath \"\"" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.104973 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/975bb927-fc71-49d5-8e02-6576974b6be6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.331037 4800 generic.go:334] "Generic (PLEG): container finished" podID="975bb927-fc71-49d5-8e02-6576974b6be6" containerID="cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47" exitCode=0 Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.331102 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdzh2" event={"ID":"975bb927-fc71-49d5-8e02-6576974b6be6","Type":"ContainerDied","Data":"cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47"} Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.331168 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdzh2" event={"ID":"975bb927-fc71-49d5-8e02-6576974b6be6","Type":"ContainerDied","Data":"76ae5f5975adca0ea743059ae5d238d55ec4e1f13849f606b219c85075d24f46"} Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.331198 4800 scope.go:117] "RemoveContainer" containerID="cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.331123 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdzh2" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.365035 4800 scope.go:117] "RemoveContainer" containerID="cc35505342faf594a0186c0b9854bd30c697a03e3aa524751f3a5ad1df5f3e8e" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.382990 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pdzh2"] Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.396731 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pdzh2"] Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.424120 4800 scope.go:117] "RemoveContainer" containerID="6f53a2291951ffa1f5e168aec9c29c29fb7844fb402e3d7898fec5abb52940ad" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.443585 4800 scope.go:117] "RemoveContainer" containerID="cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47" Nov 25 17:31:20 crc kubenswrapper[4800]: E1125 17:31:20.444037 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47\": container with ID starting with cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47 not found: ID does not exist" containerID="cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.444071 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47"} err="failed to get container status \"cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47\": rpc error: code = NotFound desc = could not find container \"cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47\": container with ID starting with cb4ac2a2f79c98ea8b03da0af1f95937f6a208973afec19cd9e1e656b9cbaf47 not found: ID does not exist" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.444091 4800 scope.go:117] "RemoveContainer" containerID="cc35505342faf594a0186c0b9854bd30c697a03e3aa524751f3a5ad1df5f3e8e" Nov 25 17:31:20 crc kubenswrapper[4800]: E1125 17:31:20.444383 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc35505342faf594a0186c0b9854bd30c697a03e3aa524751f3a5ad1df5f3e8e\": container with ID starting with cc35505342faf594a0186c0b9854bd30c697a03e3aa524751f3a5ad1df5f3e8e not found: ID does not exist" containerID="cc35505342faf594a0186c0b9854bd30c697a03e3aa524751f3a5ad1df5f3e8e" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.444411 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc35505342faf594a0186c0b9854bd30c697a03e3aa524751f3a5ad1df5f3e8e"} err="failed to get container status \"cc35505342faf594a0186c0b9854bd30c697a03e3aa524751f3a5ad1df5f3e8e\": rpc error: code = NotFound desc = could not find container \"cc35505342faf594a0186c0b9854bd30c697a03e3aa524751f3a5ad1df5f3e8e\": container with ID starting with cc35505342faf594a0186c0b9854bd30c697a03e3aa524751f3a5ad1df5f3e8e not found: ID does not exist" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.444424 4800 scope.go:117] "RemoveContainer" containerID="6f53a2291951ffa1f5e168aec9c29c29fb7844fb402e3d7898fec5abb52940ad" Nov 25 17:31:20 crc kubenswrapper[4800]: E1125 17:31:20.444978 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f53a2291951ffa1f5e168aec9c29c29fb7844fb402e3d7898fec5abb52940ad\": container with ID starting with 6f53a2291951ffa1f5e168aec9c29c29fb7844fb402e3d7898fec5abb52940ad not found: ID does not exist" containerID="6f53a2291951ffa1f5e168aec9c29c29fb7844fb402e3d7898fec5abb52940ad" Nov 25 17:31:20 crc kubenswrapper[4800]: I1125 17:31:20.445039 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f53a2291951ffa1f5e168aec9c29c29fb7844fb402e3d7898fec5abb52940ad"} err="failed to get container status \"6f53a2291951ffa1f5e168aec9c29c29fb7844fb402e3d7898fec5abb52940ad\": rpc error: code = NotFound desc = could not find container \"6f53a2291951ffa1f5e168aec9c29c29fb7844fb402e3d7898fec5abb52940ad\": container with ID starting with 6f53a2291951ffa1f5e168aec9c29c29fb7844fb402e3d7898fec5abb52940ad not found: ID does not exist" Nov 25 17:31:21 crc kubenswrapper[4800]: I1125 17:31:21.811204 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="975bb927-fc71-49d5-8e02-6576974b6be6" path="/var/lib/kubelet/pods/975bb927-fc71-49d5-8e02-6576974b6be6/volumes" Nov 25 17:31:42 crc kubenswrapper[4800]: I1125 17:31:42.640633 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:31:42 crc kubenswrapper[4800]: I1125 17:31:42.641436 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.147071 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sjsjq"] Nov 25 17:31:47 crc kubenswrapper[4800]: E1125 17:31:47.148225 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="975bb927-fc71-49d5-8e02-6576974b6be6" containerName="registry-server" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.148246 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="975bb927-fc71-49d5-8e02-6576974b6be6" containerName="registry-server" Nov 25 17:31:47 crc kubenswrapper[4800]: E1125 17:31:47.148303 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="975bb927-fc71-49d5-8e02-6576974b6be6" containerName="extract-content" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.148315 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="975bb927-fc71-49d5-8e02-6576974b6be6" containerName="extract-content" Nov 25 17:31:47 crc kubenswrapper[4800]: E1125 17:31:47.148344 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="975bb927-fc71-49d5-8e02-6576974b6be6" containerName="extract-utilities" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.148357 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="975bb927-fc71-49d5-8e02-6576974b6be6" containerName="extract-utilities" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.148692 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="975bb927-fc71-49d5-8e02-6576974b6be6" containerName="registry-server" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.151327 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.159007 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-catalog-content\") pod \"redhat-marketplace-sjsjq\" (UID: \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\") " pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.159177 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcbh8\" (UniqueName: \"kubernetes.io/projected/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-kube-api-access-mcbh8\") pod \"redhat-marketplace-sjsjq\" (UID: \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\") " pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.159554 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-utilities\") pod \"redhat-marketplace-sjsjq\" (UID: \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\") " pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.165975 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjsjq"] Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.262712 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-catalog-content\") pod \"redhat-marketplace-sjsjq\" (UID: \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\") " pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.262776 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcbh8\" (UniqueName: \"kubernetes.io/projected/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-kube-api-access-mcbh8\") pod \"redhat-marketplace-sjsjq\" (UID: \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\") " pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.262878 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-utilities\") pod \"redhat-marketplace-sjsjq\" (UID: \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\") " pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.263176 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-catalog-content\") pod \"redhat-marketplace-sjsjq\" (UID: \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\") " pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.263386 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-utilities\") pod \"redhat-marketplace-sjsjq\" (UID: \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\") " pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.294275 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcbh8\" (UniqueName: \"kubernetes.io/projected/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-kube-api-access-mcbh8\") pod \"redhat-marketplace-sjsjq\" (UID: \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\") " pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:47 crc kubenswrapper[4800]: I1125 17:31:47.522422 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:48 crc kubenswrapper[4800]: I1125 17:31:48.004202 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjsjq"] Nov 25 17:31:48 crc kubenswrapper[4800]: I1125 17:31:48.651049 4800 generic.go:334] "Generic (PLEG): container finished" podID="ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" containerID="d2f9836d0bc1c09f6d3f4a0ca86acf697052d26b0402ac94aa4c172df172a770" exitCode=0 Nov 25 17:31:48 crc kubenswrapper[4800]: I1125 17:31:48.651138 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjsjq" event={"ID":"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9","Type":"ContainerDied","Data":"d2f9836d0bc1c09f6d3f4a0ca86acf697052d26b0402ac94aa4c172df172a770"} Nov 25 17:31:48 crc kubenswrapper[4800]: I1125 17:31:48.651378 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjsjq" event={"ID":"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9","Type":"ContainerStarted","Data":"dee71813771b4cc27081d3fbb84e2e1b1e4852f937254bc46995901ddcc99f97"} Nov 25 17:31:49 crc kubenswrapper[4800]: I1125 17:31:49.670542 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjsjq" event={"ID":"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9","Type":"ContainerStarted","Data":"f9dd979a332f0547b46a0ec62d5ea4d0af95b78b70706aa12888c7363d7badab"} Nov 25 17:31:50 crc kubenswrapper[4800]: I1125 17:31:50.689582 4800 generic.go:334] "Generic (PLEG): container finished" podID="ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" containerID="f9dd979a332f0547b46a0ec62d5ea4d0af95b78b70706aa12888c7363d7badab" exitCode=0 Nov 25 17:31:50 crc kubenswrapper[4800]: I1125 17:31:50.689649 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjsjq" event={"ID":"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9","Type":"ContainerDied","Data":"f9dd979a332f0547b46a0ec62d5ea4d0af95b78b70706aa12888c7363d7badab"} Nov 25 17:31:51 crc kubenswrapper[4800]: I1125 17:31:51.724823 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjsjq" event={"ID":"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9","Type":"ContainerStarted","Data":"a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a"} Nov 25 17:31:51 crc kubenswrapper[4800]: I1125 17:31:51.760124 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sjsjq" podStartSLOduration=2.339883435 podStartE2EDuration="4.76010598s" podCreationTimestamp="2025-11-25 17:31:47 +0000 UTC" firstStartedPulling="2025-11-25 17:31:48.652410719 +0000 UTC m=+8069.706819201" lastFinishedPulling="2025-11-25 17:31:51.072633224 +0000 UTC m=+8072.127041746" observedRunningTime="2025-11-25 17:31:51.752669279 +0000 UTC m=+8072.807077781" watchObservedRunningTime="2025-11-25 17:31:51.76010598 +0000 UTC m=+8072.814514472" Nov 25 17:31:57 crc kubenswrapper[4800]: I1125 17:31:57.523410 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:57 crc kubenswrapper[4800]: I1125 17:31:57.524059 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:57 crc kubenswrapper[4800]: I1125 17:31:57.592287 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:57 crc kubenswrapper[4800]: I1125 17:31:57.867346 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:31:57 crc kubenswrapper[4800]: I1125 17:31:57.932361 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjsjq"] Nov 25 17:31:59 crc kubenswrapper[4800]: I1125 17:31:59.802560 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sjsjq" podUID="ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" containerName="registry-server" containerID="cri-o://a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a" gracePeriod=2 Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.317666 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.390514 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-catalog-content\") pod \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\" (UID: \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\") " Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.390606 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-utilities\") pod \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\" (UID: \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\") " Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.390670 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcbh8\" (UniqueName: \"kubernetes.io/projected/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-kube-api-access-mcbh8\") pod \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\" (UID: \"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9\") " Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.391666 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-utilities" (OuterVolumeSpecName: "utilities") pod "ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" (UID: "ee8ec8e3-dd60-4967-9ff5-84c0c865acc9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.397553 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-kube-api-access-mcbh8" (OuterVolumeSpecName: "kube-api-access-mcbh8") pod "ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" (UID: "ee8ec8e3-dd60-4967-9ff5-84c0c865acc9"). InnerVolumeSpecName "kube-api-access-mcbh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.417822 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" (UID: "ee8ec8e3-dd60-4967-9ff5-84c0c865acc9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.492657 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.492882 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcbh8\" (UniqueName: \"kubernetes.io/projected/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-kube-api-access-mcbh8\") on node \"crc\" DevicePath \"\"" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.492968 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.824255 4800 generic.go:334] "Generic (PLEG): container finished" podID="ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" containerID="a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a" exitCode=0 Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.824346 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjsjq" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.824340 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjsjq" event={"ID":"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9","Type":"ContainerDied","Data":"a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a"} Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.824779 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjsjq" event={"ID":"ee8ec8e3-dd60-4967-9ff5-84c0c865acc9","Type":"ContainerDied","Data":"dee71813771b4cc27081d3fbb84e2e1b1e4852f937254bc46995901ddcc99f97"} Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.824818 4800 scope.go:117] "RemoveContainer" containerID="a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.852381 4800 scope.go:117] "RemoveContainer" containerID="f9dd979a332f0547b46a0ec62d5ea4d0af95b78b70706aa12888c7363d7badab" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.886546 4800 scope.go:117] "RemoveContainer" containerID="d2f9836d0bc1c09f6d3f4a0ca86acf697052d26b0402ac94aa4c172df172a770" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.896280 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjsjq"] Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.923168 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjsjq"] Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.933254 4800 scope.go:117] "RemoveContainer" containerID="a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a" Nov 25 17:32:00 crc kubenswrapper[4800]: E1125 17:32:00.933900 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a\": container with ID starting with a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a not found: ID does not exist" containerID="a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.933957 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a"} err="failed to get container status \"a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a\": rpc error: code = NotFound desc = could not find container \"a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a\": container with ID starting with a92b3847e62acde111d3f8f5457fc81d2c066ba9115884e9d02b1ae23149a68a not found: ID does not exist" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.933992 4800 scope.go:117] "RemoveContainer" containerID="f9dd979a332f0547b46a0ec62d5ea4d0af95b78b70706aa12888c7363d7badab" Nov 25 17:32:00 crc kubenswrapper[4800]: E1125 17:32:00.935113 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9dd979a332f0547b46a0ec62d5ea4d0af95b78b70706aa12888c7363d7badab\": container with ID starting with f9dd979a332f0547b46a0ec62d5ea4d0af95b78b70706aa12888c7363d7badab not found: ID does not exist" containerID="f9dd979a332f0547b46a0ec62d5ea4d0af95b78b70706aa12888c7363d7badab" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.935186 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9dd979a332f0547b46a0ec62d5ea4d0af95b78b70706aa12888c7363d7badab"} err="failed to get container status \"f9dd979a332f0547b46a0ec62d5ea4d0af95b78b70706aa12888c7363d7badab\": rpc error: code = NotFound desc = could not find container \"f9dd979a332f0547b46a0ec62d5ea4d0af95b78b70706aa12888c7363d7badab\": container with ID starting with f9dd979a332f0547b46a0ec62d5ea4d0af95b78b70706aa12888c7363d7badab not found: ID does not exist" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.935235 4800 scope.go:117] "RemoveContainer" containerID="d2f9836d0bc1c09f6d3f4a0ca86acf697052d26b0402ac94aa4c172df172a770" Nov 25 17:32:00 crc kubenswrapper[4800]: E1125 17:32:00.936026 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2f9836d0bc1c09f6d3f4a0ca86acf697052d26b0402ac94aa4c172df172a770\": container with ID starting with d2f9836d0bc1c09f6d3f4a0ca86acf697052d26b0402ac94aa4c172df172a770 not found: ID does not exist" containerID="d2f9836d0bc1c09f6d3f4a0ca86acf697052d26b0402ac94aa4c172df172a770" Nov 25 17:32:00 crc kubenswrapper[4800]: I1125 17:32:00.936075 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2f9836d0bc1c09f6d3f4a0ca86acf697052d26b0402ac94aa4c172df172a770"} err="failed to get container status \"d2f9836d0bc1c09f6d3f4a0ca86acf697052d26b0402ac94aa4c172df172a770\": rpc error: code = NotFound desc = could not find container \"d2f9836d0bc1c09f6d3f4a0ca86acf697052d26b0402ac94aa4c172df172a770\": container with ID starting with d2f9836d0bc1c09f6d3f4a0ca86acf697052d26b0402ac94aa4c172df172a770 not found: ID does not exist" Nov 25 17:32:01 crc kubenswrapper[4800]: I1125 17:32:01.804457 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" path="/var/lib/kubelet/pods/ee8ec8e3-dd60-4967-9ff5-84c0c865acc9/volumes" Nov 25 17:32:12 crc kubenswrapper[4800]: I1125 17:32:12.640711 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:32:12 crc kubenswrapper[4800]: I1125 17:32:12.641474 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:32:42 crc kubenswrapper[4800]: I1125 17:32:42.639917 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:32:42 crc kubenswrapper[4800]: I1125 17:32:42.640585 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:32:42 crc kubenswrapper[4800]: I1125 17:32:42.640651 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 17:32:42 crc kubenswrapper[4800]: I1125 17:32:42.642023 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:32:42 crc kubenswrapper[4800]: I1125 17:32:42.642165 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" gracePeriod=600 Nov 25 17:32:42 crc kubenswrapper[4800]: E1125 17:32:42.804237 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:32:43 crc kubenswrapper[4800]: I1125 17:32:43.310987 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" exitCode=0 Nov 25 17:32:43 crc kubenswrapper[4800]: I1125 17:32:43.311068 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2"} Nov 25 17:32:43 crc kubenswrapper[4800]: I1125 17:32:43.311146 4800 scope.go:117] "RemoveContainer" containerID="8f44cf5110cad2c84d575cff5470f84ba7a97098df2e62b4b9d02fa05bcea5e6" Nov 25 17:32:43 crc kubenswrapper[4800]: I1125 17:32:43.312184 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:32:43 crc kubenswrapper[4800]: E1125 17:32:43.312658 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:32:54 crc kubenswrapper[4800]: I1125 17:32:54.785412 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:32:54 crc kubenswrapper[4800]: E1125 17:32:54.786461 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:33:07 crc kubenswrapper[4800]: I1125 17:33:07.787491 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:33:07 crc kubenswrapper[4800]: E1125 17:33:07.789136 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:33:22 crc kubenswrapper[4800]: I1125 17:33:22.786598 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:33:22 crc kubenswrapper[4800]: E1125 17:33:22.787976 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:33:37 crc kubenswrapper[4800]: I1125 17:33:37.786381 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:33:37 crc kubenswrapper[4800]: E1125 17:33:37.788308 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:33:50 crc kubenswrapper[4800]: I1125 17:33:50.786816 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:33:50 crc kubenswrapper[4800]: E1125 17:33:50.788299 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:34:01 crc kubenswrapper[4800]: I1125 17:34:01.791148 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:34:01 crc kubenswrapper[4800]: E1125 17:34:01.792620 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:34:16 crc kubenswrapper[4800]: I1125 17:34:16.786297 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:34:16 crc kubenswrapper[4800]: E1125 17:34:16.787556 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:34:30 crc kubenswrapper[4800]: I1125 17:34:30.785451 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:34:30 crc kubenswrapper[4800]: E1125 17:34:30.786465 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:34:40 crc kubenswrapper[4800]: I1125 17:34:40.646283 4800 generic.go:334] "Generic (PLEG): container finished" podID="2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" containerID="1ebf395b86045770c15794bafcca4446b61704cace1630221d6d3ad0d1698632" exitCode=0 Nov 25 17:34:40 crc kubenswrapper[4800]: I1125 17:34:40.646381 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-test" event={"ID":"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f","Type":"ContainerDied","Data":"1ebf395b86045770c15794bafcca4446b61704cace1630221d6d3ad0d1698632"} Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.201611 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.208181 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-config-data\") pod \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.208256 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.208305 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krmm5\" (UniqueName: \"kubernetes.io/projected/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-kube-api-access-krmm5\") pod \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.209130 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-config-data" (OuterVolumeSpecName: "config-data") pod "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" (UID: "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.213808 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-kube-api-access-krmm5" (OuterVolumeSpecName: "kube-api-access-krmm5") pod "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" (UID: "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f"). InnerVolumeSpecName "kube-api-access-krmm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.216021 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "test-operator-logs") pod "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" (UID: "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.311223 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-test-operator-ephemeral-workdir\") pod \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.311342 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-test-operator-ephemeral-temporary\") pod \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.311485 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-openstack-config-secret\") pod \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.311605 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ceph\") pod \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.311824 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ssh-key\") pod \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.312033 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-openstack-config\") pod \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.312262 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ca-certs\") pod \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\" (UID: \"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f\") " Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.313375 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.313456 4800 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.313489 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krmm5\" (UniqueName: \"kubernetes.io/projected/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-kube-api-access-krmm5\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.316211 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" (UID: "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.317411 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" (UID: "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.320038 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ceph" (OuterVolumeSpecName: "ceph") pod "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" (UID: "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.334159 4800 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.345618 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" (UID: "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.351167 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" (UID: "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.360219 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" (UID: "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.381917 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" (UID: "2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.416029 4800 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.416075 4800 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.416094 4800 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.416110 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.416125 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.416136 4800 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.416147 4800 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.416159 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.676230 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-test" event={"ID":"2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f","Type":"ContainerDied","Data":"41c75593056051bf4bc695a1cd511fa08ba3b3559144aeb0b8bc3a367724afbc"} Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.676306 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="41c75593056051bf4bc695a1cd511fa08ba3b3559144aeb0b8bc3a367724afbc" Nov 25 17:34:42 crc kubenswrapper[4800]: I1125 17:34:42.676309 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-test" Nov 25 17:34:44 crc kubenswrapper[4800]: I1125 17:34:44.786238 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:34:44 crc kubenswrapper[4800]: E1125 17:34:44.786766 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:34:51 crc kubenswrapper[4800]: I1125 17:34:51.909045 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 17:34:51 crc kubenswrapper[4800]: E1125 17:34:51.910223 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" containerName="tempest-tests-tempest-tests-runner" Nov 25 17:34:51 crc kubenswrapper[4800]: I1125 17:34:51.910241 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" containerName="tempest-tests-tempest-tests-runner" Nov 25 17:34:51 crc kubenswrapper[4800]: E1125 17:34:51.910266 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" containerName="registry-server" Nov 25 17:34:51 crc kubenswrapper[4800]: I1125 17:34:51.910275 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" containerName="registry-server" Nov 25 17:34:51 crc kubenswrapper[4800]: E1125 17:34:51.910309 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" containerName="extract-content" Nov 25 17:34:51 crc kubenswrapper[4800]: I1125 17:34:51.910317 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" containerName="extract-content" Nov 25 17:34:51 crc kubenswrapper[4800]: E1125 17:34:51.910340 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" containerName="extract-utilities" Nov 25 17:34:51 crc kubenswrapper[4800]: I1125 17:34:51.910349 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" containerName="extract-utilities" Nov 25 17:34:51 crc kubenswrapper[4800]: I1125 17:34:51.910582 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f" containerName="tempest-tests-tempest-tests-runner" Nov 25 17:34:51 crc kubenswrapper[4800]: I1125 17:34:51.910605 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee8ec8e3-dd60-4967-9ff5-84c0c865acc9" containerName="registry-server" Nov 25 17:34:51 crc kubenswrapper[4800]: I1125 17:34:51.911905 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 17:34:51 crc kubenswrapper[4800]: I1125 17:34:51.913748 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-wsw92" Nov 25 17:34:51 crc kubenswrapper[4800]: I1125 17:34:51.924265 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 17:34:52 crc kubenswrapper[4800]: I1125 17:34:52.048192 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e5062a21-fde6-4339-87c9-268d93f7b2a1\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 17:34:52 crc kubenswrapper[4800]: I1125 17:34:52.048330 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnlpv\" (UniqueName: \"kubernetes.io/projected/e5062a21-fde6-4339-87c9-268d93f7b2a1-kube-api-access-pnlpv\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e5062a21-fde6-4339-87c9-268d93f7b2a1\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 17:34:52 crc kubenswrapper[4800]: I1125 17:34:52.150780 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnlpv\" (UniqueName: \"kubernetes.io/projected/e5062a21-fde6-4339-87c9-268d93f7b2a1-kube-api-access-pnlpv\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e5062a21-fde6-4339-87c9-268d93f7b2a1\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 17:34:52 crc kubenswrapper[4800]: I1125 17:34:52.151155 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e5062a21-fde6-4339-87c9-268d93f7b2a1\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 17:34:52 crc kubenswrapper[4800]: I1125 17:34:52.151994 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e5062a21-fde6-4339-87c9-268d93f7b2a1\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 17:34:52 crc kubenswrapper[4800]: I1125 17:34:52.179964 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnlpv\" (UniqueName: \"kubernetes.io/projected/e5062a21-fde6-4339-87c9-268d93f7b2a1-kube-api-access-pnlpv\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e5062a21-fde6-4339-87c9-268d93f7b2a1\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 17:34:52 crc kubenswrapper[4800]: I1125 17:34:52.218682 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e5062a21-fde6-4339-87c9-268d93f7b2a1\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 17:34:52 crc kubenswrapper[4800]: I1125 17:34:52.252071 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 17:34:52 crc kubenswrapper[4800]: I1125 17:34:52.733248 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 17:34:52 crc kubenswrapper[4800]: I1125 17:34:52.741406 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:34:52 crc kubenswrapper[4800]: I1125 17:34:52.793941 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"e5062a21-fde6-4339-87c9-268d93f7b2a1","Type":"ContainerStarted","Data":"76842f3a16f6c7f5fc381cecce58edff05d1eab783453dfdba8c2696d830e945"} Nov 25 17:34:54 crc kubenswrapper[4800]: I1125 17:34:54.823687 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"e5062a21-fde6-4339-87c9-268d93f7b2a1","Type":"ContainerStarted","Data":"19b796a722bbfed3daf5295da425d153c64b6b4ae61465e458f874eca6742b86"} Nov 25 17:34:54 crc kubenswrapper[4800]: I1125 17:34:54.843491 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.901820234 podStartE2EDuration="3.843465459s" podCreationTimestamp="2025-11-25 17:34:51 +0000 UTC" firstStartedPulling="2025-11-25 17:34:52.741176572 +0000 UTC m=+8253.795585064" lastFinishedPulling="2025-11-25 17:34:53.682821767 +0000 UTC m=+8254.737230289" observedRunningTime="2025-11-25 17:34:54.841111505 +0000 UTC m=+8255.895520027" watchObservedRunningTime="2025-11-25 17:34:54.843465459 +0000 UTC m=+8255.897873971" Nov 25 17:34:59 crc kubenswrapper[4800]: I1125 17:34:59.794282 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:34:59 crc kubenswrapper[4800]: E1125 17:34:59.794982 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.801919 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tobiko-tests-tobiko-s00-podified-functional"] Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.804670 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.808701 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"tobiko-secret" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.809082 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"test-operator-clouds-config" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.809284 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tobiko-tests-tobikotobiko-private-key" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.810058 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tobiko-tests-tobikotobiko-config" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.810272 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tobiko-tests-tobikotobiko-public-key" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.810442 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tobiko-tests-tobiko-s00-podified-functional"] Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.938475 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tobiko-public-key\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-public-key\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.938561 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-openstack-config-secret\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.938646 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzq6n\" (UniqueName: \"kubernetes.io/projected/1b0af985-22c6-472c-99fd-aa42bee61e14-kube-api-access-qzq6n\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.938917 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tobiko-config\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-config\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.939023 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-ceph\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.939107 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-ephemeral-workdir\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.939193 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-clouds-config\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-clouds-config\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.939271 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-kubeconfig\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.939304 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-ca-certs\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.939392 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tobiko-private-key\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-private-key\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.939454 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-ephemeral-temporary\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:10 crc kubenswrapper[4800]: I1125 17:35:10.939588 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.041341 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tobiko-public-key\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-public-key\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.041423 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-openstack-config-secret\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.041467 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzq6n\" (UniqueName: \"kubernetes.io/projected/1b0af985-22c6-472c-99fd-aa42bee61e14-kube-api-access-qzq6n\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.041545 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tobiko-config\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-config\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.041596 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-ceph\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.041652 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-ephemeral-workdir\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.041693 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-clouds-config\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-clouds-config\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.041756 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-kubeconfig\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.041787 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-ca-certs\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.041943 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tobiko-private-key\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-private-key\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.042014 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-ephemeral-temporary\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.042060 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.042438 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tobiko-config\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-config\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.042497 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.042903 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-ephemeral-temporary\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.043051 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-ephemeral-workdir\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.043130 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-clouds-config\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-clouds-config\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.043593 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tobiko-private-key\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-private-key\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.044070 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tobiko-public-key\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-public-key\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.049433 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-ceph\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.050265 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-kubeconfig\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.050826 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-ca-certs\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.064039 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-openstack-config-secret\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.077153 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzq6n\" (UniqueName: \"kubernetes.io/projected/1b0af985-22c6-472c-99fd-aa42bee61e14-kube-api-access-qzq6n\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.097072 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tobiko-tests-tobiko-s00-podified-functional\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.145731 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:35:11 crc kubenswrapper[4800]: I1125 17:35:11.771237 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tobiko-tests-tobiko-s00-podified-functional"] Nov 25 17:35:11 crc kubenswrapper[4800]: W1125 17:35:11.775975 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b0af985_22c6_472c_99fd_aa42bee61e14.slice/crio-b45c725526d34ab2002737d52c787b076e99b7195ca236e034f5367e0e2031f1 WatchSource:0}: Error finding container b45c725526d34ab2002737d52c787b076e99b7195ca236e034f5367e0e2031f1: Status 404 returned error can't find the container with id b45c725526d34ab2002737d52c787b076e99b7195ca236e034f5367e0e2031f1 Nov 25 17:35:12 crc kubenswrapper[4800]: I1125 17:35:12.039681 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tobiko-tests-tobiko-s00-podified-functional" event={"ID":"1b0af985-22c6-472c-99fd-aa42bee61e14","Type":"ContainerStarted","Data":"b45c725526d34ab2002737d52c787b076e99b7195ca236e034f5367e0e2031f1"} Nov 25 17:35:13 crc kubenswrapper[4800]: I1125 17:35:13.785586 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:35:13 crc kubenswrapper[4800]: E1125 17:35:13.786323 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:35:27 crc kubenswrapper[4800]: I1125 17:35:27.203911 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tobiko-tests-tobiko-s00-podified-functional" event={"ID":"1b0af985-22c6-472c-99fd-aa42bee61e14","Type":"ContainerStarted","Data":"dcbb1dd23c6378dce6aa3c13bc4e0d330430728ebfd870bbbddb000af4542af3"} Nov 25 17:35:27 crc kubenswrapper[4800]: I1125 17:35:27.232131 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tobiko-tests-tobiko-s00-podified-functional" podStartSLOduration=3.448028612 podStartE2EDuration="18.2321081s" podCreationTimestamp="2025-11-25 17:35:09 +0000 UTC" firstStartedPulling="2025-11-25 17:35:11.779926937 +0000 UTC m=+8272.834335409" lastFinishedPulling="2025-11-25 17:35:26.564006415 +0000 UTC m=+8287.618414897" observedRunningTime="2025-11-25 17:35:27.231829322 +0000 UTC m=+8288.286237844" watchObservedRunningTime="2025-11-25 17:35:27.2321081 +0000 UTC m=+8288.286516612" Nov 25 17:35:28 crc kubenswrapper[4800]: I1125 17:35:28.786810 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:35:28 crc kubenswrapper[4800]: E1125 17:35:28.787297 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:35:39 crc kubenswrapper[4800]: I1125 17:35:39.795485 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:35:39 crc kubenswrapper[4800]: E1125 17:35:39.796328 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:35:50 crc kubenswrapper[4800]: I1125 17:35:50.785908 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:35:50 crc kubenswrapper[4800]: E1125 17:35:50.787035 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:36:03 crc kubenswrapper[4800]: I1125 17:36:03.786336 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:36:03 crc kubenswrapper[4800]: E1125 17:36:03.787724 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:36:17 crc kubenswrapper[4800]: I1125 17:36:17.788064 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:36:17 crc kubenswrapper[4800]: E1125 17:36:17.789117 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:36:28 crc kubenswrapper[4800]: I1125 17:36:28.786349 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:36:28 crc kubenswrapper[4800]: E1125 17:36:28.787394 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:36:30 crc kubenswrapper[4800]: I1125 17:36:30.959253 4800 generic.go:334] "Generic (PLEG): container finished" podID="1b0af985-22c6-472c-99fd-aa42bee61e14" containerID="dcbb1dd23c6378dce6aa3c13bc4e0d330430728ebfd870bbbddb000af4542af3" exitCode=0 Nov 25 17:36:30 crc kubenswrapper[4800]: I1125 17:36:30.959971 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tobiko-tests-tobiko-s00-podified-functional" event={"ID":"1b0af985-22c6-472c-99fd-aa42bee61e14","Type":"ContainerDied","Data":"dcbb1dd23c6378dce6aa3c13bc4e0d330430728ebfd870bbbddb000af4542af3"} Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.631472 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.733808 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"1b0af985-22c6-472c-99fd-aa42bee61e14\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.733881 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tobiko-private-key\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-private-key\") pod \"1b0af985-22c6-472c-99fd-aa42bee61e14\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.733923 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-ephemeral-workdir\") pod \"1b0af985-22c6-472c-99fd-aa42bee61e14\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.733984 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tobiko-config\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-config\") pod \"1b0af985-22c6-472c-99fd-aa42bee61e14\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.734013 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-ceph\") pod \"1b0af985-22c6-472c-99fd-aa42bee61e14\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.734130 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-clouds-config\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-clouds-config\") pod \"1b0af985-22c6-472c-99fd-aa42bee61e14\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.734159 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-ca-certs\") pod \"1b0af985-22c6-472c-99fd-aa42bee61e14\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.734181 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-ephemeral-temporary\") pod \"1b0af985-22c6-472c-99fd-aa42bee61e14\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.734225 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-openstack-config-secret\") pod \"1b0af985-22c6-472c-99fd-aa42bee61e14\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.734269 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tobiko-public-key\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-public-key\") pod \"1b0af985-22c6-472c-99fd-aa42bee61e14\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.734315 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzq6n\" (UniqueName: \"kubernetes.io/projected/1b0af985-22c6-472c-99fd-aa42bee61e14-kube-api-access-qzq6n\") pod \"1b0af985-22c6-472c-99fd-aa42bee61e14\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.734341 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-kubeconfig\") pod \"1b0af985-22c6-472c-99fd-aa42bee61e14\" (UID: \"1b0af985-22c6-472c-99fd-aa42bee61e14\") " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.735983 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "1b0af985-22c6-472c-99fd-aa42bee61e14" (UID: "1b0af985-22c6-472c-99fd-aa42bee61e14"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.744143 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-ceph" (OuterVolumeSpecName: "ceph") pod "1b0af985-22c6-472c-99fd-aa42bee61e14" (UID: "1b0af985-22c6-472c-99fd-aa42bee61e14"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.752663 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b0af985-22c6-472c-99fd-aa42bee61e14-kube-api-access-qzq6n" (OuterVolumeSpecName: "kube-api-access-qzq6n") pod "1b0af985-22c6-472c-99fd-aa42bee61e14" (UID: "1b0af985-22c6-472c-99fd-aa42bee61e14"). InnerVolumeSpecName "kube-api-access-qzq6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.755357 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "test-operator-logs") pod "1b0af985-22c6-472c-99fd-aa42bee61e14" (UID: "1b0af985-22c6-472c-99fd-aa42bee61e14"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.772566 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tobiko-tests-tobiko-s01-sanity"] Nov 25 17:36:32 crc kubenswrapper[4800]: E1125 17:36:32.773331 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b0af985-22c6-472c-99fd-aa42bee61e14" containerName="tobiko-tests-tobiko" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.773362 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b0af985-22c6-472c-99fd-aa42bee61e14" containerName="tobiko-tests-tobiko" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.773689 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b0af985-22c6-472c-99fd-aa42bee61e14" containerName="tobiko-tests-tobiko" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.777035 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.792142 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tobiko-tests-tobiko-s01-sanity"] Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.800001 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-public-key" (OuterVolumeSpecName: "tobiko-public-key") pod "1b0af985-22c6-472c-99fd-aa42bee61e14" (UID: "1b0af985-22c6-472c-99fd-aa42bee61e14"). InnerVolumeSpecName "tobiko-public-key". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.803057 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-config" (OuterVolumeSpecName: "tobiko-config") pod "1b0af985-22c6-472c-99fd-aa42bee61e14" (UID: "1b0af985-22c6-472c-99fd-aa42bee61e14"). InnerVolumeSpecName "tobiko-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.807545 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-private-key" (OuterVolumeSpecName: "tobiko-private-key") pod "1b0af985-22c6-472c-99fd-aa42bee61e14" (UID: "1b0af985-22c6-472c-99fd-aa42bee61e14"). InnerVolumeSpecName "tobiko-private-key". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.807743 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-kubeconfig" (OuterVolumeSpecName: "kubeconfig") pod "1b0af985-22c6-472c-99fd-aa42bee61e14" (UID: "1b0af985-22c6-472c-99fd-aa42bee61e14"). InnerVolumeSpecName "kubeconfig". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.810346 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "1b0af985-22c6-472c-99fd-aa42bee61e14" (UID: "1b0af985-22c6-472c-99fd-aa42bee61e14"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.830587 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "1b0af985-22c6-472c-99fd-aa42bee61e14" (UID: "1b0af985-22c6-472c-99fd-aa42bee61e14"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.837501 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-clouds-config" (OuterVolumeSpecName: "test-operator-clouds-config") pod "1b0af985-22c6-472c-99fd-aa42bee61e14" (UID: "1b0af985-22c6-472c-99fd-aa42bee61e14"). InnerVolumeSpecName "test-operator-clouds-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.837661 4800 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.837725 4800 reconciler_common.go:293] "Volume detached for volume \"tobiko-private-key\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-private-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.837739 4800 reconciler_common.go:293] "Volume detached for volume \"tobiko-config\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.837751 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.837759 4800 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.837767 4800 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.837798 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.837808 4800 reconciler_common.go:293] "Volume detached for volume \"tobiko-public-key\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-tobiko-public-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.837818 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzq6n\" (UniqueName: \"kubernetes.io/projected/1b0af985-22c6-472c-99fd-aa42bee61e14-kube-api-access-qzq6n\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.837827 4800 reconciler_common.go:293] "Volume detached for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/secret/1b0af985-22c6-472c-99fd-aa42bee61e14-kubeconfig\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.858796 4800 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.938757 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-ephemeral-workdir\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.938803 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tobiko-public-key\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-public-key\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.938831 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.938871 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-ceph\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.938940 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-openstack-config-secret\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.938998 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.939003 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-kubeconfig\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.939085 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-clouds-config\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-clouds-config\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.939105 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-ephemeral-temporary\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.939124 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-ca-certs\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.939309 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tobiko-private-key\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-private-key\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.939435 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khstz\" (UniqueName: \"kubernetes.io/projected/b25425bd-52d1-42fe-837a-99f02547084f-kube-api-access-khstz\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.939526 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tobiko-config\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-config\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.939696 4800 reconciler_common.go:293] "Volume detached for volume \"test-operator-clouds-config\" (UniqueName: \"kubernetes.io/configmap/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-clouds-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.963936 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.992528 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tobiko-tests-tobiko-s00-podified-functional" event={"ID":"1b0af985-22c6-472c-99fd-aa42bee61e14","Type":"ContainerDied","Data":"b45c725526d34ab2002737d52c787b076e99b7195ca236e034f5367e0e2031f1"} Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.992588 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b45c725526d34ab2002737d52c787b076e99b7195ca236e034f5367e0e2031f1" Nov 25 17:36:32 crc kubenswrapper[4800]: I1125 17:36:32.992605 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tobiko-tests-tobiko-s00-podified-functional" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.041357 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-kubeconfig\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.041434 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-clouds-config\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-clouds-config\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.041458 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-ephemeral-temporary\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.041478 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-ca-certs\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.041527 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tobiko-private-key\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-private-key\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.041572 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khstz\" (UniqueName: \"kubernetes.io/projected/b25425bd-52d1-42fe-837a-99f02547084f-kube-api-access-khstz\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.041614 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tobiko-config\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-config\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.041677 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-ephemeral-workdir\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.041706 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tobiko-public-key\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-public-key\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.041727 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-ceph\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.041788 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-openstack-config-secret\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.042936 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-ephemeral-workdir\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.043372 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-ephemeral-temporary\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.043815 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tobiko-config\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-config\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.043893 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tobiko-public-key\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-public-key\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.044140 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tobiko-private-key\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-private-key\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.044283 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-clouds-config\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-clouds-config\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.046446 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-openstack-config-secret\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.046799 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-ca-certs\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.046883 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-kubeconfig\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.047415 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-ceph\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.061622 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khstz\" (UniqueName: \"kubernetes.io/projected/b25425bd-52d1-42fe-837a-99f02547084f-kube-api-access-khstz\") pod \"tobiko-tests-tobiko-s01-sanity\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.220548 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:36:33 crc kubenswrapper[4800]: I1125 17:36:33.855766 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tobiko-tests-tobiko-s01-sanity"] Nov 25 17:36:34 crc kubenswrapper[4800]: I1125 17:36:34.004767 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tobiko-tests-tobiko-s01-sanity" event={"ID":"b25425bd-52d1-42fe-837a-99f02547084f","Type":"ContainerStarted","Data":"6b6d874b8c5d05c6a912b6a0d457f5250fa757488f22ca3dd867f09d8b73475a"} Nov 25 17:36:34 crc kubenswrapper[4800]: I1125 17:36:34.179239 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "1b0af985-22c6-472c-99fd-aa42bee61e14" (UID: "1b0af985-22c6-472c-99fd-aa42bee61e14"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:36:34 crc kubenswrapper[4800]: I1125 17:36:34.271544 4800 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/1b0af985-22c6-472c-99fd-aa42bee61e14-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:40 crc kubenswrapper[4800]: I1125 17:36:40.075317 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tobiko-tests-tobiko-s01-sanity" event={"ID":"b25425bd-52d1-42fe-837a-99f02547084f","Type":"ContainerStarted","Data":"0754cd847b364d806a58a3d511bcf8fd667f02bf74fb10b4d5177646324223a2"} Nov 25 17:36:40 crc kubenswrapper[4800]: I1125 17:36:40.124320 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tobiko-tests-tobiko-s01-sanity" podStartSLOduration=8.124292235 podStartE2EDuration="8.124292235s" podCreationTimestamp="2025-11-25 17:36:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:36:40.105926824 +0000 UTC m=+8361.160335346" watchObservedRunningTime="2025-11-25 17:36:40.124292235 +0000 UTC m=+8361.178700747" Nov 25 17:36:42 crc kubenswrapper[4800]: I1125 17:36:42.785703 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:36:42 crc kubenswrapper[4800]: E1125 17:36:42.787000 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:36:55 crc kubenswrapper[4800]: I1125 17:36:55.786151 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:36:55 crc kubenswrapper[4800]: E1125 17:36:55.787200 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:37:07 crc kubenswrapper[4800]: I1125 17:37:07.785288 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:37:07 crc kubenswrapper[4800]: E1125 17:37:07.785955 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:37:20 crc kubenswrapper[4800]: I1125 17:37:20.785807 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:37:20 crc kubenswrapper[4800]: E1125 17:37:20.786672 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:37:31 crc kubenswrapper[4800]: I1125 17:37:31.785759 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:37:31 crc kubenswrapper[4800]: E1125 17:37:31.786866 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:37:46 crc kubenswrapper[4800]: I1125 17:37:46.786023 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:37:47 crc kubenswrapper[4800]: I1125 17:37:47.849174 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"078b57d8a5f5024f1311b46f754707d63dba82cd527688596873eddf4031ce8d"} Nov 25 17:38:08 crc kubenswrapper[4800]: I1125 17:38:08.063150 4800 generic.go:334] "Generic (PLEG): container finished" podID="b25425bd-52d1-42fe-837a-99f02547084f" containerID="0754cd847b364d806a58a3d511bcf8fd667f02bf74fb10b4d5177646324223a2" exitCode=0 Nov 25 17:38:08 crc kubenswrapper[4800]: I1125 17:38:08.063245 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tobiko-tests-tobiko-s01-sanity" event={"ID":"b25425bd-52d1-42fe-837a-99f02547084f","Type":"ContainerDied","Data":"0754cd847b364d806a58a3d511bcf8fd667f02bf74fb10b4d5177646324223a2"} Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.613275 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.718925 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-kubeconfig\") pod \"b25425bd-52d1-42fe-837a-99f02547084f\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.719049 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-clouds-config\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-clouds-config\") pod \"b25425bd-52d1-42fe-837a-99f02547084f\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.719089 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-ceph\") pod \"b25425bd-52d1-42fe-837a-99f02547084f\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.719172 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tobiko-config\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-config\") pod \"b25425bd-52d1-42fe-837a-99f02547084f\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.719218 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"b25425bd-52d1-42fe-837a-99f02547084f\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.719248 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tobiko-private-key\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-private-key\") pod \"b25425bd-52d1-42fe-837a-99f02547084f\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.719303 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-openstack-config-secret\") pod \"b25425bd-52d1-42fe-837a-99f02547084f\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.719346 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khstz\" (UniqueName: \"kubernetes.io/projected/b25425bd-52d1-42fe-837a-99f02547084f-kube-api-access-khstz\") pod \"b25425bd-52d1-42fe-837a-99f02547084f\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.719388 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-ephemeral-temporary\") pod \"b25425bd-52d1-42fe-837a-99f02547084f\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.719513 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-ca-certs\") pod \"b25425bd-52d1-42fe-837a-99f02547084f\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.719541 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tobiko-public-key\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-public-key\") pod \"b25425bd-52d1-42fe-837a-99f02547084f\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.719568 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-ephemeral-workdir\") pod \"b25425bd-52d1-42fe-837a-99f02547084f\" (UID: \"b25425bd-52d1-42fe-837a-99f02547084f\") " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.720521 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "b25425bd-52d1-42fe-837a-99f02547084f" (UID: "b25425bd-52d1-42fe-837a-99f02547084f"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.726013 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "test-operator-logs") pod "b25425bd-52d1-42fe-837a-99f02547084f" (UID: "b25425bd-52d1-42fe-837a-99f02547084f"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.726462 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b25425bd-52d1-42fe-837a-99f02547084f-kube-api-access-khstz" (OuterVolumeSpecName: "kube-api-access-khstz") pod "b25425bd-52d1-42fe-837a-99f02547084f" (UID: "b25425bd-52d1-42fe-837a-99f02547084f"). InnerVolumeSpecName "kube-api-access-khstz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.741049 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-ceph" (OuterVolumeSpecName: "ceph") pod "b25425bd-52d1-42fe-837a-99f02547084f" (UID: "b25425bd-52d1-42fe-837a-99f02547084f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.769240 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-kubeconfig" (OuterVolumeSpecName: "kubeconfig") pod "b25425bd-52d1-42fe-837a-99f02547084f" (UID: "b25425bd-52d1-42fe-837a-99f02547084f"). InnerVolumeSpecName "kubeconfig". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.769821 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "b25425bd-52d1-42fe-837a-99f02547084f" (UID: "b25425bd-52d1-42fe-837a-99f02547084f"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.792649 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-config" (OuterVolumeSpecName: "tobiko-config") pod "b25425bd-52d1-42fe-837a-99f02547084f" (UID: "b25425bd-52d1-42fe-837a-99f02547084f"). InnerVolumeSpecName "tobiko-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.796528 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-public-key" (OuterVolumeSpecName: "tobiko-public-key") pod "b25425bd-52d1-42fe-837a-99f02547084f" (UID: "b25425bd-52d1-42fe-837a-99f02547084f"). InnerVolumeSpecName "tobiko-public-key". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.805600 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-private-key" (OuterVolumeSpecName: "tobiko-private-key") pod "b25425bd-52d1-42fe-837a-99f02547084f" (UID: "b25425bd-52d1-42fe-837a-99f02547084f"). InnerVolumeSpecName "tobiko-private-key". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.807540 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "b25425bd-52d1-42fe-837a-99f02547084f" (UID: "b25425bd-52d1-42fe-837a-99f02547084f"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.812433 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-clouds-config" (OuterVolumeSpecName: "test-operator-clouds-config") pod "b25425bd-52d1-42fe-837a-99f02547084f" (UID: "b25425bd-52d1-42fe-837a-99f02547084f"). InnerVolumeSpecName "test-operator-clouds-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.822952 4800 reconciler_common.go:293] "Volume detached for volume \"test-operator-clouds-config\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-clouds-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.822994 4800 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.823008 4800 reconciler_common.go:293] "Volume detached for volume \"tobiko-config\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.823041 4800 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.823055 4800 reconciler_common.go:293] "Volume detached for volume \"tobiko-private-key\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-private-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.823068 4800 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.823080 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khstz\" (UniqueName: \"kubernetes.io/projected/b25425bd-52d1-42fe-837a-99f02547084f-kube-api-access-khstz\") on node \"crc\" DevicePath \"\"" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.823094 4800 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.823105 4800 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.823118 4800 reconciler_common.go:293] "Volume detached for volume \"tobiko-public-key\" (UniqueName: \"kubernetes.io/configmap/b25425bd-52d1-42fe-837a-99f02547084f-tobiko-public-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.823134 4800 reconciler_common.go:293] "Volume detached for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/secret/b25425bd-52d1-42fe-837a-99f02547084f-kubeconfig\") on node \"crc\" DevicePath \"\"" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.843699 4800 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 25 17:38:09 crc kubenswrapper[4800]: I1125 17:38:09.924333 4800 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 25 17:38:10 crc kubenswrapper[4800]: I1125 17:38:10.094148 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tobiko-tests-tobiko-s01-sanity" event={"ID":"b25425bd-52d1-42fe-837a-99f02547084f","Type":"ContainerDied","Data":"6b6d874b8c5d05c6a912b6a0d457f5250fa757488f22ca3dd867f09d8b73475a"} Nov 25 17:38:10 crc kubenswrapper[4800]: I1125 17:38:10.094265 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b6d874b8c5d05c6a912b6a0d457f5250fa757488f22ca3dd867f09d8b73475a" Nov 25 17:38:10 crc kubenswrapper[4800]: I1125 17:38:10.094537 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tobiko-tests-tobiko-s01-sanity" Nov 25 17:38:11 crc kubenswrapper[4800]: I1125 17:38:11.399222 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "b25425bd-52d1-42fe-837a-99f02547084f" (UID: "b25425bd-52d1-42fe-837a-99f02547084f"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:38:11 crc kubenswrapper[4800]: I1125 17:38:11.456616 4800 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b25425bd-52d1-42fe-837a-99f02547084f-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 25 17:38:15 crc kubenswrapper[4800]: I1125 17:38:15.655080 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko"] Nov 25 17:38:15 crc kubenswrapper[4800]: E1125 17:38:15.656815 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b25425bd-52d1-42fe-837a-99f02547084f" containerName="tobiko-tests-tobiko" Nov 25 17:38:15 crc kubenswrapper[4800]: I1125 17:38:15.656877 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b25425bd-52d1-42fe-837a-99f02547084f" containerName="tobiko-tests-tobiko" Nov 25 17:38:15 crc kubenswrapper[4800]: I1125 17:38:15.657304 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b25425bd-52d1-42fe-837a-99f02547084f" containerName="tobiko-tests-tobiko" Nov 25 17:38:15 crc kubenswrapper[4800]: I1125 17:38:15.658470 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko" Nov 25 17:38:15 crc kubenswrapper[4800]: I1125 17:38:15.663975 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko"] Nov 25 17:38:15 crc kubenswrapper[4800]: I1125 17:38:15.751685 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tobiko-tobiko-tests-tobiko\" (UID: \"7e8bfbb6-72be-47cb-bde0-3fc5d7264e02\") " pod="openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko" Nov 25 17:38:15 crc kubenswrapper[4800]: I1125 17:38:15.751902 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcpv8\" (UniqueName: \"kubernetes.io/projected/7e8bfbb6-72be-47cb-bde0-3fc5d7264e02-kube-api-access-vcpv8\") pod \"test-operator-logs-pod-tobiko-tobiko-tests-tobiko\" (UID: \"7e8bfbb6-72be-47cb-bde0-3fc5d7264e02\") " pod="openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko" Nov 25 17:38:15 crc kubenswrapper[4800]: I1125 17:38:15.854181 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcpv8\" (UniqueName: \"kubernetes.io/projected/7e8bfbb6-72be-47cb-bde0-3fc5d7264e02-kube-api-access-vcpv8\") pod \"test-operator-logs-pod-tobiko-tobiko-tests-tobiko\" (UID: \"7e8bfbb6-72be-47cb-bde0-3fc5d7264e02\") " pod="openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko" Nov 25 17:38:15 crc kubenswrapper[4800]: I1125 17:38:15.854269 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tobiko-tobiko-tests-tobiko\" (UID: \"7e8bfbb6-72be-47cb-bde0-3fc5d7264e02\") " pod="openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko" Nov 25 17:38:15 crc kubenswrapper[4800]: I1125 17:38:15.854782 4800 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tobiko-tobiko-tests-tobiko\" (UID: \"7e8bfbb6-72be-47cb-bde0-3fc5d7264e02\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko" Nov 25 17:38:15 crc kubenswrapper[4800]: I1125 17:38:15.877539 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcpv8\" (UniqueName: \"kubernetes.io/projected/7e8bfbb6-72be-47cb-bde0-3fc5d7264e02-kube-api-access-vcpv8\") pod \"test-operator-logs-pod-tobiko-tobiko-tests-tobiko\" (UID: \"7e8bfbb6-72be-47cb-bde0-3fc5d7264e02\") " pod="openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko" Nov 25 17:38:15 crc kubenswrapper[4800]: I1125 17:38:15.879770 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tobiko-tobiko-tests-tobiko\" (UID: \"7e8bfbb6-72be-47cb-bde0-3fc5d7264e02\") " pod="openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko" Nov 25 17:38:16 crc kubenswrapper[4800]: I1125 17:38:16.020083 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko" Nov 25 17:38:16 crc kubenswrapper[4800]: I1125 17:38:16.544488 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko"] Nov 25 17:38:17 crc kubenswrapper[4800]: I1125 17:38:17.211252 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko" event={"ID":"7e8bfbb6-72be-47cb-bde0-3fc5d7264e02","Type":"ContainerStarted","Data":"47b90f71be2f6ed94202ff44cbfcb20f9b07403531b42457b3440becc8e050bd"} Nov 25 17:38:18 crc kubenswrapper[4800]: I1125 17:38:18.225074 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko" event={"ID":"7e8bfbb6-72be-47cb-bde0-3fc5d7264e02","Type":"ContainerStarted","Data":"8fc72f305964b6d12c5fe75a60a834b4ab2aad74adbcc4d61821cf4313735a02"} Nov 25 17:38:18 crc kubenswrapper[4800]: I1125 17:38:18.262108 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tobiko-tobiko-tests-tobiko" podStartSLOduration=2.782807335 podStartE2EDuration="3.262073212s" podCreationTimestamp="2025-11-25 17:38:15 +0000 UTC" firstStartedPulling="2025-11-25 17:38:16.54523611 +0000 UTC m=+8457.599644602" lastFinishedPulling="2025-11-25 17:38:17.024501987 +0000 UTC m=+8458.078910479" observedRunningTime="2025-11-25 17:38:18.246313183 +0000 UTC m=+8459.300721675" watchObservedRunningTime="2025-11-25 17:38:18.262073212 +0000 UTC m=+8459.316481744" Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.598071 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vw8gc"] Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.616499 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.633500 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vw8gc"] Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.740027 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ce400fd-2b6b-45ad-a200-e2c284793cb1-utilities\") pod \"community-operators-vw8gc\" (UID: \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\") " pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.740735 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ce400fd-2b6b-45ad-a200-e2c284793cb1-catalog-content\") pod \"community-operators-vw8gc\" (UID: \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\") " pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.740901 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvn9n\" (UniqueName: \"kubernetes.io/projected/5ce400fd-2b6b-45ad-a200-e2c284793cb1-kube-api-access-zvn9n\") pod \"community-operators-vw8gc\" (UID: \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\") " pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.843539 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ce400fd-2b6b-45ad-a200-e2c284793cb1-catalog-content\") pod \"community-operators-vw8gc\" (UID: \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\") " pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.843619 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvn9n\" (UniqueName: \"kubernetes.io/projected/5ce400fd-2b6b-45ad-a200-e2c284793cb1-kube-api-access-zvn9n\") pod \"community-operators-vw8gc\" (UID: \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\") " pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.843686 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ce400fd-2b6b-45ad-a200-e2c284793cb1-utilities\") pod \"community-operators-vw8gc\" (UID: \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\") " pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.844444 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ce400fd-2b6b-45ad-a200-e2c284793cb1-utilities\") pod \"community-operators-vw8gc\" (UID: \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\") " pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.844561 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ce400fd-2b6b-45ad-a200-e2c284793cb1-catalog-content\") pod \"community-operators-vw8gc\" (UID: \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\") " pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.869774 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvn9n\" (UniqueName: \"kubernetes.io/projected/5ce400fd-2b6b-45ad-a200-e2c284793cb1-kube-api-access-zvn9n\") pod \"community-operators-vw8gc\" (UID: \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\") " pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:15 crc kubenswrapper[4800]: I1125 17:39:15.959627 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:16 crc kubenswrapper[4800]: I1125 17:39:16.497060 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vw8gc"] Nov 25 17:39:16 crc kubenswrapper[4800]: I1125 17:39:16.885293 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vw8gc" event={"ID":"5ce400fd-2b6b-45ad-a200-e2c284793cb1","Type":"ContainerStarted","Data":"c416900c94dc3cfb3e7f2f82f4e0b4c766d4eb94909b90ac419ec4dae046d519"} Nov 25 17:39:16 crc kubenswrapper[4800]: I1125 17:39:16.887077 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vw8gc" event={"ID":"5ce400fd-2b6b-45ad-a200-e2c284793cb1","Type":"ContainerStarted","Data":"f4a14c795a46c645e2ee54fb87f8dfb2a841856f3da09be807f54ea9419c39e8"} Nov 25 17:39:17 crc kubenswrapper[4800]: I1125 17:39:17.900147 4800 generic.go:334] "Generic (PLEG): container finished" podID="5ce400fd-2b6b-45ad-a200-e2c284793cb1" containerID="c416900c94dc3cfb3e7f2f82f4e0b4c766d4eb94909b90ac419ec4dae046d519" exitCode=0 Nov 25 17:39:17 crc kubenswrapper[4800]: I1125 17:39:17.900203 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vw8gc" event={"ID":"5ce400fd-2b6b-45ad-a200-e2c284793cb1","Type":"ContainerDied","Data":"c416900c94dc3cfb3e7f2f82f4e0b4c766d4eb94909b90ac419ec4dae046d519"} Nov 25 17:39:19 crc kubenswrapper[4800]: I1125 17:39:19.922977 4800 generic.go:334] "Generic (PLEG): container finished" podID="5ce400fd-2b6b-45ad-a200-e2c284793cb1" containerID="288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c" exitCode=0 Nov 25 17:39:19 crc kubenswrapper[4800]: I1125 17:39:19.923095 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vw8gc" event={"ID":"5ce400fd-2b6b-45ad-a200-e2c284793cb1","Type":"ContainerDied","Data":"288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c"} Nov 25 17:39:21 crc kubenswrapper[4800]: I1125 17:39:21.945467 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vw8gc" event={"ID":"5ce400fd-2b6b-45ad-a200-e2c284793cb1","Type":"ContainerStarted","Data":"4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac"} Nov 25 17:39:21 crc kubenswrapper[4800]: I1125 17:39:21.974033 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vw8gc" podStartSLOduration=3.812261137 podStartE2EDuration="6.974014854s" podCreationTimestamp="2025-11-25 17:39:15 +0000 UTC" firstStartedPulling="2025-11-25 17:39:17.904251068 +0000 UTC m=+8518.958659560" lastFinishedPulling="2025-11-25 17:39:21.066004755 +0000 UTC m=+8522.120413277" observedRunningTime="2025-11-25 17:39:21.97059575 +0000 UTC m=+8523.025004242" watchObservedRunningTime="2025-11-25 17:39:21.974014854 +0000 UTC m=+8523.028423346" Nov 25 17:39:23 crc kubenswrapper[4800]: E1125 17:39:23.091032 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ce400fd_2b6b_45ad_a200_e2c284793cb1.slice/crio-conmon-288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:39:25 crc kubenswrapper[4800]: I1125 17:39:25.960580 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:25 crc kubenswrapper[4800]: I1125 17:39:25.961204 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:26 crc kubenswrapper[4800]: I1125 17:39:26.038977 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:26 crc kubenswrapper[4800]: I1125 17:39:26.096223 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:26 crc kubenswrapper[4800]: I1125 17:39:26.277875 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vw8gc"] Nov 25 17:39:28 crc kubenswrapper[4800]: I1125 17:39:28.028947 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vw8gc" podUID="5ce400fd-2b6b-45ad-a200-e2c284793cb1" containerName="registry-server" containerID="cri-o://4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac" gracePeriod=2 Nov 25 17:39:28 crc kubenswrapper[4800]: I1125 17:39:28.514213 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:28 crc kubenswrapper[4800]: I1125 17:39:28.628904 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvn9n\" (UniqueName: \"kubernetes.io/projected/5ce400fd-2b6b-45ad-a200-e2c284793cb1-kube-api-access-zvn9n\") pod \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\" (UID: \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\") " Nov 25 17:39:28 crc kubenswrapper[4800]: I1125 17:39:28.629289 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ce400fd-2b6b-45ad-a200-e2c284793cb1-utilities\") pod \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\" (UID: \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\") " Nov 25 17:39:28 crc kubenswrapper[4800]: I1125 17:39:28.629381 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ce400fd-2b6b-45ad-a200-e2c284793cb1-catalog-content\") pod \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\" (UID: \"5ce400fd-2b6b-45ad-a200-e2c284793cb1\") " Nov 25 17:39:28 crc kubenswrapper[4800]: I1125 17:39:28.630280 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ce400fd-2b6b-45ad-a200-e2c284793cb1-utilities" (OuterVolumeSpecName: "utilities") pod "5ce400fd-2b6b-45ad-a200-e2c284793cb1" (UID: "5ce400fd-2b6b-45ad-a200-e2c284793cb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:39:28 crc kubenswrapper[4800]: I1125 17:39:28.635978 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ce400fd-2b6b-45ad-a200-e2c284793cb1-kube-api-access-zvn9n" (OuterVolumeSpecName: "kube-api-access-zvn9n") pod "5ce400fd-2b6b-45ad-a200-e2c284793cb1" (UID: "5ce400fd-2b6b-45ad-a200-e2c284793cb1"). InnerVolumeSpecName "kube-api-access-zvn9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:39:28 crc kubenswrapper[4800]: I1125 17:39:28.714130 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ce400fd-2b6b-45ad-a200-e2c284793cb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5ce400fd-2b6b-45ad-a200-e2c284793cb1" (UID: "5ce400fd-2b6b-45ad-a200-e2c284793cb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:39:28 crc kubenswrapper[4800]: I1125 17:39:28.732712 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ce400fd-2b6b-45ad-a200-e2c284793cb1-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:39:28 crc kubenswrapper[4800]: I1125 17:39:28.732759 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ce400fd-2b6b-45ad-a200-e2c284793cb1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:39:28 crc kubenswrapper[4800]: I1125 17:39:28.732776 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvn9n\" (UniqueName: \"kubernetes.io/projected/5ce400fd-2b6b-45ad-a200-e2c284793cb1-kube-api-access-zvn9n\") on node \"crc\" DevicePath \"\"" Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.044731 4800 generic.go:334] "Generic (PLEG): container finished" podID="5ce400fd-2b6b-45ad-a200-e2c284793cb1" containerID="4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac" exitCode=0 Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.044805 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vw8gc" event={"ID":"5ce400fd-2b6b-45ad-a200-e2c284793cb1","Type":"ContainerDied","Data":"4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac"} Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.044899 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vw8gc" event={"ID":"5ce400fd-2b6b-45ad-a200-e2c284793cb1","Type":"ContainerDied","Data":"f4a14c795a46c645e2ee54fb87f8dfb2a841856f3da09be807f54ea9419c39e8"} Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.044941 4800 scope.go:117] "RemoveContainer" containerID="4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac" Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.045161 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vw8gc" Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.078175 4800 scope.go:117] "RemoveContainer" containerID="288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c" Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.113954 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vw8gc"] Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.129736 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vw8gc"] Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.136576 4800 scope.go:117] "RemoveContainer" containerID="c416900c94dc3cfb3e7f2f82f4e0b4c766d4eb94909b90ac419ec4dae046d519" Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.188998 4800 scope.go:117] "RemoveContainer" containerID="4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac" Nov 25 17:39:29 crc kubenswrapper[4800]: E1125 17:39:29.189474 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac\": container with ID starting with 4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac not found: ID does not exist" containerID="4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac" Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.189532 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac"} err="failed to get container status \"4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac\": rpc error: code = NotFound desc = could not find container \"4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac\": container with ID starting with 4fb722ba987784ad70842e330b67d940932e9508ea05f7d677765be581cf7bac not found: ID does not exist" Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.189563 4800 scope.go:117] "RemoveContainer" containerID="288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c" Nov 25 17:39:29 crc kubenswrapper[4800]: E1125 17:39:29.191370 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c\": container with ID starting with 288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c not found: ID does not exist" containerID="288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c" Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.191412 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c"} err="failed to get container status \"288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c\": rpc error: code = NotFound desc = could not find container \"288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c\": container with ID starting with 288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c not found: ID does not exist" Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.191439 4800 scope.go:117] "RemoveContainer" containerID="c416900c94dc3cfb3e7f2f82f4e0b4c766d4eb94909b90ac419ec4dae046d519" Nov 25 17:39:29 crc kubenswrapper[4800]: E1125 17:39:29.191978 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c416900c94dc3cfb3e7f2f82f4e0b4c766d4eb94909b90ac419ec4dae046d519\": container with ID starting with c416900c94dc3cfb3e7f2f82f4e0b4c766d4eb94909b90ac419ec4dae046d519 not found: ID does not exist" containerID="c416900c94dc3cfb3e7f2f82f4e0b4c766d4eb94909b90ac419ec4dae046d519" Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.192020 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c416900c94dc3cfb3e7f2f82f4e0b4c766d4eb94909b90ac419ec4dae046d519"} err="failed to get container status \"c416900c94dc3cfb3e7f2f82f4e0b4c766d4eb94909b90ac419ec4dae046d519\": rpc error: code = NotFound desc = could not find container \"c416900c94dc3cfb3e7f2f82f4e0b4c766d4eb94909b90ac419ec4dae046d519\": container with ID starting with c416900c94dc3cfb3e7f2f82f4e0b4c766d4eb94909b90ac419ec4dae046d519 not found: ID does not exist" Nov 25 17:39:29 crc kubenswrapper[4800]: I1125 17:39:29.805642 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ce400fd-2b6b-45ad-a200-e2c284793cb1" path="/var/lib/kubelet/pods/5ce400fd-2b6b-45ad-a200-e2c284793cb1/volumes" Nov 25 17:39:33 crc kubenswrapper[4800]: E1125 17:39:33.367904 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ce400fd_2b6b_45ad_a200_e2c284793cb1.slice/crio-conmon-288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:39:43 crc kubenswrapper[4800]: E1125 17:39:43.642186 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ce400fd_2b6b_45ad_a200_e2c284793cb1.slice/crio-conmon-288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.544606 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mgtct"] Nov 25 17:39:53 crc kubenswrapper[4800]: E1125 17:39:53.546203 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ce400fd-2b6b-45ad-a200-e2c284793cb1" containerName="extract-content" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.546224 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ce400fd-2b6b-45ad-a200-e2c284793cb1" containerName="extract-content" Nov 25 17:39:53 crc kubenswrapper[4800]: E1125 17:39:53.546239 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ce400fd-2b6b-45ad-a200-e2c284793cb1" containerName="extract-utilities" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.546247 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ce400fd-2b6b-45ad-a200-e2c284793cb1" containerName="extract-utilities" Nov 25 17:39:53 crc kubenswrapper[4800]: E1125 17:39:53.546275 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ce400fd-2b6b-45ad-a200-e2c284793cb1" containerName="registry-server" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.546284 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ce400fd-2b6b-45ad-a200-e2c284793cb1" containerName="registry-server" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.546593 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ce400fd-2b6b-45ad-a200-e2c284793cb1" containerName="registry-server" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.549346 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.559775 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mgtct"] Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.631464 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/905b56fc-93a1-40ba-935b-c8d56c99fefa-utilities\") pod \"redhat-operators-mgtct\" (UID: \"905b56fc-93a1-40ba-935b-c8d56c99fefa\") " pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.631671 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/905b56fc-93a1-40ba-935b-c8d56c99fefa-catalog-content\") pod \"redhat-operators-mgtct\" (UID: \"905b56fc-93a1-40ba-935b-c8d56c99fefa\") " pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.632120 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnxcj\" (UniqueName: \"kubernetes.io/projected/905b56fc-93a1-40ba-935b-c8d56c99fefa-kube-api-access-mnxcj\") pod \"redhat-operators-mgtct\" (UID: \"905b56fc-93a1-40ba-935b-c8d56c99fefa\") " pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.734194 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnxcj\" (UniqueName: \"kubernetes.io/projected/905b56fc-93a1-40ba-935b-c8d56c99fefa-kube-api-access-mnxcj\") pod \"redhat-operators-mgtct\" (UID: \"905b56fc-93a1-40ba-935b-c8d56c99fefa\") " pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.734289 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/905b56fc-93a1-40ba-935b-c8d56c99fefa-utilities\") pod \"redhat-operators-mgtct\" (UID: \"905b56fc-93a1-40ba-935b-c8d56c99fefa\") " pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.734370 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/905b56fc-93a1-40ba-935b-c8d56c99fefa-catalog-content\") pod \"redhat-operators-mgtct\" (UID: \"905b56fc-93a1-40ba-935b-c8d56c99fefa\") " pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.734899 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/905b56fc-93a1-40ba-935b-c8d56c99fefa-utilities\") pod \"redhat-operators-mgtct\" (UID: \"905b56fc-93a1-40ba-935b-c8d56c99fefa\") " pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.735015 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/905b56fc-93a1-40ba-935b-c8d56c99fefa-catalog-content\") pod \"redhat-operators-mgtct\" (UID: \"905b56fc-93a1-40ba-935b-c8d56c99fefa\") " pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.775228 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnxcj\" (UniqueName: \"kubernetes.io/projected/905b56fc-93a1-40ba-935b-c8d56c99fefa-kube-api-access-mnxcj\") pod \"redhat-operators-mgtct\" (UID: \"905b56fc-93a1-40ba-935b-c8d56c99fefa\") " pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:39:53 crc kubenswrapper[4800]: I1125 17:39:53.903872 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:39:53 crc kubenswrapper[4800]: E1125 17:39:53.915473 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ce400fd_2b6b_45ad_a200_e2c284793cb1.slice/crio-conmon-288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:39:54 crc kubenswrapper[4800]: I1125 17:39:54.386084 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mgtct"] Nov 25 17:39:55 crc kubenswrapper[4800]: I1125 17:39:55.356148 4800 generic.go:334] "Generic (PLEG): container finished" podID="905b56fc-93a1-40ba-935b-c8d56c99fefa" containerID="c0ebe01b0d4b72041972511af5e345dcf27d05fee9ca87c1ca4f511c183f9756" exitCode=0 Nov 25 17:39:55 crc kubenswrapper[4800]: I1125 17:39:55.356816 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mgtct" event={"ID":"905b56fc-93a1-40ba-935b-c8d56c99fefa","Type":"ContainerDied","Data":"c0ebe01b0d4b72041972511af5e345dcf27d05fee9ca87c1ca4f511c183f9756"} Nov 25 17:39:55 crc kubenswrapper[4800]: I1125 17:39:55.356891 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mgtct" event={"ID":"905b56fc-93a1-40ba-935b-c8d56c99fefa","Type":"ContainerStarted","Data":"bda377cb6cb4894cedeaea52b2387844d4686b3d42335a0a3bcef724ca9d0597"} Nov 25 17:39:55 crc kubenswrapper[4800]: I1125 17:39:55.362997 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:39:57 crc kubenswrapper[4800]: I1125 17:39:57.381173 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mgtct" event={"ID":"905b56fc-93a1-40ba-935b-c8d56c99fefa","Type":"ContainerStarted","Data":"539d93be8ce2af8e487f68127af3fbc4b90e23db8ead633b25693c14164fa6d3"} Nov 25 17:39:58 crc kubenswrapper[4800]: I1125 17:39:58.396098 4800 generic.go:334] "Generic (PLEG): container finished" podID="905b56fc-93a1-40ba-935b-c8d56c99fefa" containerID="539d93be8ce2af8e487f68127af3fbc4b90e23db8ead633b25693c14164fa6d3" exitCode=0 Nov 25 17:39:58 crc kubenswrapper[4800]: I1125 17:39:58.396182 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mgtct" event={"ID":"905b56fc-93a1-40ba-935b-c8d56c99fefa","Type":"ContainerDied","Data":"539d93be8ce2af8e487f68127af3fbc4b90e23db8ead633b25693c14164fa6d3"} Nov 25 17:40:00 crc kubenswrapper[4800]: I1125 17:40:00.424078 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mgtct" event={"ID":"905b56fc-93a1-40ba-935b-c8d56c99fefa","Type":"ContainerStarted","Data":"f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03"} Nov 25 17:40:00 crc kubenswrapper[4800]: I1125 17:40:00.449732 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mgtct" podStartSLOduration=3.104504217 podStartE2EDuration="7.449706556s" podCreationTimestamp="2025-11-25 17:39:53 +0000 UTC" firstStartedPulling="2025-11-25 17:39:55.362438051 +0000 UTC m=+8556.416846553" lastFinishedPulling="2025-11-25 17:39:59.70764041 +0000 UTC m=+8560.762048892" observedRunningTime="2025-11-25 17:40:00.44726367 +0000 UTC m=+8561.501672152" watchObservedRunningTime="2025-11-25 17:40:00.449706556 +0000 UTC m=+8561.504115078" Nov 25 17:40:03 crc kubenswrapper[4800]: I1125 17:40:03.905696 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:40:03 crc kubenswrapper[4800]: I1125 17:40:03.906538 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:40:04 crc kubenswrapper[4800]: E1125 17:40:04.172889 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ce400fd_2b6b_45ad_a200_e2c284793cb1.slice/crio-conmon-288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:40:04 crc kubenswrapper[4800]: I1125 17:40:04.998013 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mgtct" podUID="905b56fc-93a1-40ba-935b-c8d56c99fefa" containerName="registry-server" probeResult="failure" output=< Nov 25 17:40:04 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 17:40:04 crc kubenswrapper[4800]: > Nov 25 17:40:12 crc kubenswrapper[4800]: I1125 17:40:12.639866 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:40:12 crc kubenswrapper[4800]: I1125 17:40:12.640420 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:40:13 crc kubenswrapper[4800]: I1125 17:40:13.978767 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:40:14 crc kubenswrapper[4800]: I1125 17:40:14.085471 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:40:14 crc kubenswrapper[4800]: I1125 17:40:14.232028 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mgtct"] Nov 25 17:40:14 crc kubenswrapper[4800]: E1125 17:40:14.463079 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ce400fd_2b6b_45ad_a200_e2c284793cb1.slice/crio-conmon-288c8e455e3b22609e55ec17f8dcf96fb31976638cb68e2d2e18a9fbffa7e40c.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:40:15 crc kubenswrapper[4800]: I1125 17:40:15.608244 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mgtct" podUID="905b56fc-93a1-40ba-935b-c8d56c99fefa" containerName="registry-server" containerID="cri-o://f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03" gracePeriod=2 Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.064588 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.144095 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnxcj\" (UniqueName: \"kubernetes.io/projected/905b56fc-93a1-40ba-935b-c8d56c99fefa-kube-api-access-mnxcj\") pod \"905b56fc-93a1-40ba-935b-c8d56c99fefa\" (UID: \"905b56fc-93a1-40ba-935b-c8d56c99fefa\") " Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.144236 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/905b56fc-93a1-40ba-935b-c8d56c99fefa-catalog-content\") pod \"905b56fc-93a1-40ba-935b-c8d56c99fefa\" (UID: \"905b56fc-93a1-40ba-935b-c8d56c99fefa\") " Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.144300 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/905b56fc-93a1-40ba-935b-c8d56c99fefa-utilities\") pod \"905b56fc-93a1-40ba-935b-c8d56c99fefa\" (UID: \"905b56fc-93a1-40ba-935b-c8d56c99fefa\") " Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.145426 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/905b56fc-93a1-40ba-935b-c8d56c99fefa-utilities" (OuterVolumeSpecName: "utilities") pod "905b56fc-93a1-40ba-935b-c8d56c99fefa" (UID: "905b56fc-93a1-40ba-935b-c8d56c99fefa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.158486 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/905b56fc-93a1-40ba-935b-c8d56c99fefa-kube-api-access-mnxcj" (OuterVolumeSpecName: "kube-api-access-mnxcj") pod "905b56fc-93a1-40ba-935b-c8d56c99fefa" (UID: "905b56fc-93a1-40ba-935b-c8d56c99fefa"). InnerVolumeSpecName "kube-api-access-mnxcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.240636 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/905b56fc-93a1-40ba-935b-c8d56c99fefa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "905b56fc-93a1-40ba-935b-c8d56c99fefa" (UID: "905b56fc-93a1-40ba-935b-c8d56c99fefa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.246539 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/905b56fc-93a1-40ba-935b-c8d56c99fefa-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.246568 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/905b56fc-93a1-40ba-935b-c8d56c99fefa-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.246578 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnxcj\" (UniqueName: \"kubernetes.io/projected/905b56fc-93a1-40ba-935b-c8d56c99fefa-kube-api-access-mnxcj\") on node \"crc\" DevicePath \"\"" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.621017 4800 generic.go:334] "Generic (PLEG): container finished" podID="905b56fc-93a1-40ba-935b-c8d56c99fefa" containerID="f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03" exitCode=0 Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.621115 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mgtct" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.621153 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mgtct" event={"ID":"905b56fc-93a1-40ba-935b-c8d56c99fefa","Type":"ContainerDied","Data":"f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03"} Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.621550 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mgtct" event={"ID":"905b56fc-93a1-40ba-935b-c8d56c99fefa","Type":"ContainerDied","Data":"bda377cb6cb4894cedeaea52b2387844d4686b3d42335a0a3bcef724ca9d0597"} Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.621586 4800 scope.go:117] "RemoveContainer" containerID="f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.662038 4800 scope.go:117] "RemoveContainer" containerID="539d93be8ce2af8e487f68127af3fbc4b90e23db8ead633b25693c14164fa6d3" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.671992 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mgtct"] Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.680615 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mgtct"] Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.691961 4800 scope.go:117] "RemoveContainer" containerID="c0ebe01b0d4b72041972511af5e345dcf27d05fee9ca87c1ca4f511c183f9756" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.732338 4800 scope.go:117] "RemoveContainer" containerID="f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03" Nov 25 17:40:16 crc kubenswrapper[4800]: E1125 17:40:16.732886 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03\": container with ID starting with f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03 not found: ID does not exist" containerID="f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.732961 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03"} err="failed to get container status \"f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03\": rpc error: code = NotFound desc = could not find container \"f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03\": container with ID starting with f2f386b82f7131c568f36c06908646587133bce343627101d83186d2518c7b03 not found: ID does not exist" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.733004 4800 scope.go:117] "RemoveContainer" containerID="539d93be8ce2af8e487f68127af3fbc4b90e23db8ead633b25693c14164fa6d3" Nov 25 17:40:16 crc kubenswrapper[4800]: E1125 17:40:16.733417 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"539d93be8ce2af8e487f68127af3fbc4b90e23db8ead633b25693c14164fa6d3\": container with ID starting with 539d93be8ce2af8e487f68127af3fbc4b90e23db8ead633b25693c14164fa6d3 not found: ID does not exist" containerID="539d93be8ce2af8e487f68127af3fbc4b90e23db8ead633b25693c14164fa6d3" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.733471 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"539d93be8ce2af8e487f68127af3fbc4b90e23db8ead633b25693c14164fa6d3"} err="failed to get container status \"539d93be8ce2af8e487f68127af3fbc4b90e23db8ead633b25693c14164fa6d3\": rpc error: code = NotFound desc = could not find container \"539d93be8ce2af8e487f68127af3fbc4b90e23db8ead633b25693c14164fa6d3\": container with ID starting with 539d93be8ce2af8e487f68127af3fbc4b90e23db8ead633b25693c14164fa6d3 not found: ID does not exist" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.733506 4800 scope.go:117] "RemoveContainer" containerID="c0ebe01b0d4b72041972511af5e345dcf27d05fee9ca87c1ca4f511c183f9756" Nov 25 17:40:16 crc kubenswrapper[4800]: E1125 17:40:16.733926 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0ebe01b0d4b72041972511af5e345dcf27d05fee9ca87c1ca4f511c183f9756\": container with ID starting with c0ebe01b0d4b72041972511af5e345dcf27d05fee9ca87c1ca4f511c183f9756 not found: ID does not exist" containerID="c0ebe01b0d4b72041972511af5e345dcf27d05fee9ca87c1ca4f511c183f9756" Nov 25 17:40:16 crc kubenswrapper[4800]: I1125 17:40:16.733976 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0ebe01b0d4b72041972511af5e345dcf27d05fee9ca87c1ca4f511c183f9756"} err="failed to get container status \"c0ebe01b0d4b72041972511af5e345dcf27d05fee9ca87c1ca4f511c183f9756\": rpc error: code = NotFound desc = could not find container \"c0ebe01b0d4b72041972511af5e345dcf27d05fee9ca87c1ca4f511c183f9756\": container with ID starting with c0ebe01b0d4b72041972511af5e345dcf27d05fee9ca87c1ca4f511c183f9756 not found: ID does not exist" Nov 25 17:40:17 crc kubenswrapper[4800]: I1125 17:40:17.810556 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="905b56fc-93a1-40ba-935b-c8d56c99fefa" path="/var/lib/kubelet/pods/905b56fc-93a1-40ba-935b-c8d56c99fefa/volumes" Nov 25 17:40:42 crc kubenswrapper[4800]: I1125 17:40:42.640474 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:40:42 crc kubenswrapper[4800]: I1125 17:40:42.641323 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:41:12 crc kubenswrapper[4800]: I1125 17:41:12.639691 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:41:12 crc kubenswrapper[4800]: I1125 17:41:12.640391 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:41:12 crc kubenswrapper[4800]: I1125 17:41:12.640449 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 17:41:12 crc kubenswrapper[4800]: I1125 17:41:12.641320 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"078b57d8a5f5024f1311b46f754707d63dba82cd527688596873eddf4031ce8d"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:41:12 crc kubenswrapper[4800]: I1125 17:41:12.641399 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://078b57d8a5f5024f1311b46f754707d63dba82cd527688596873eddf4031ce8d" gracePeriod=600 Nov 25 17:41:13 crc kubenswrapper[4800]: I1125 17:41:13.320456 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="078b57d8a5f5024f1311b46f754707d63dba82cd527688596873eddf4031ce8d" exitCode=0 Nov 25 17:41:13 crc kubenswrapper[4800]: I1125 17:41:13.320511 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"078b57d8a5f5024f1311b46f754707d63dba82cd527688596873eddf4031ce8d"} Nov 25 17:41:13 crc kubenswrapper[4800]: I1125 17:41:13.321017 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09"} Nov 25 17:41:13 crc kubenswrapper[4800]: I1125 17:41:13.321043 4800 scope.go:117] "RemoveContainer" containerID="e32ca763a9f7f21010f6114cc8c60393ceb2ca3359a53a59f467eadbf1730bc2" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.232155 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z2qfj"] Nov 25 17:41:48 crc kubenswrapper[4800]: E1125 17:41:48.233382 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="905b56fc-93a1-40ba-935b-c8d56c99fefa" containerName="extract-content" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.233404 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="905b56fc-93a1-40ba-935b-c8d56c99fefa" containerName="extract-content" Nov 25 17:41:48 crc kubenswrapper[4800]: E1125 17:41:48.233426 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="905b56fc-93a1-40ba-935b-c8d56c99fefa" containerName="extract-utilities" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.233439 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="905b56fc-93a1-40ba-935b-c8d56c99fefa" containerName="extract-utilities" Nov 25 17:41:48 crc kubenswrapper[4800]: E1125 17:41:48.233471 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="905b56fc-93a1-40ba-935b-c8d56c99fefa" containerName="registry-server" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.233485 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="905b56fc-93a1-40ba-935b-c8d56c99fefa" containerName="registry-server" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.233830 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="905b56fc-93a1-40ba-935b-c8d56c99fefa" containerName="registry-server" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.236505 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.240480 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z2qfj"] Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.416442 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/344e77b2-719f-4e7f-b6dc-f68e02dbf240-utilities\") pod \"certified-operators-z2qfj\" (UID: \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\") " pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.416602 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/344e77b2-719f-4e7f-b6dc-f68e02dbf240-catalog-content\") pod \"certified-operators-z2qfj\" (UID: \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\") " pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.416646 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58gzk\" (UniqueName: \"kubernetes.io/projected/344e77b2-719f-4e7f-b6dc-f68e02dbf240-kube-api-access-58gzk\") pod \"certified-operators-z2qfj\" (UID: \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\") " pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.431972 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fqh8v"] Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.434232 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.451949 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fqh8v"] Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.519201 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58gzk\" (UniqueName: \"kubernetes.io/projected/344e77b2-719f-4e7f-b6dc-f68e02dbf240-kube-api-access-58gzk\") pod \"certified-operators-z2qfj\" (UID: \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\") " pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.519261 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/344e77b2-719f-4e7f-b6dc-f68e02dbf240-catalog-content\") pod \"certified-operators-z2qfj\" (UID: \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\") " pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.519491 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/344e77b2-719f-4e7f-b6dc-f68e02dbf240-utilities\") pod \"certified-operators-z2qfj\" (UID: \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\") " pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.520225 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/344e77b2-719f-4e7f-b6dc-f68e02dbf240-utilities\") pod \"certified-operators-z2qfj\" (UID: \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\") " pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.520312 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/344e77b2-719f-4e7f-b6dc-f68e02dbf240-catalog-content\") pod \"certified-operators-z2qfj\" (UID: \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\") " pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.553033 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58gzk\" (UniqueName: \"kubernetes.io/projected/344e77b2-719f-4e7f-b6dc-f68e02dbf240-kube-api-access-58gzk\") pod \"certified-operators-z2qfj\" (UID: \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\") " pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.593703 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.621494 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91a8fb54-0ae5-4d8d-9a67-bd003820f573-utilities\") pod \"redhat-marketplace-fqh8v\" (UID: \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\") " pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.621717 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjlps\" (UniqueName: \"kubernetes.io/projected/91a8fb54-0ae5-4d8d-9a67-bd003820f573-kube-api-access-jjlps\") pod \"redhat-marketplace-fqh8v\" (UID: \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\") " pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.622201 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91a8fb54-0ae5-4d8d-9a67-bd003820f573-catalog-content\") pod \"redhat-marketplace-fqh8v\" (UID: \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\") " pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.724152 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjlps\" (UniqueName: \"kubernetes.io/projected/91a8fb54-0ae5-4d8d-9a67-bd003820f573-kube-api-access-jjlps\") pod \"redhat-marketplace-fqh8v\" (UID: \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\") " pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.724297 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91a8fb54-0ae5-4d8d-9a67-bd003820f573-catalog-content\") pod \"redhat-marketplace-fqh8v\" (UID: \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\") " pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.724388 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91a8fb54-0ae5-4d8d-9a67-bd003820f573-utilities\") pod \"redhat-marketplace-fqh8v\" (UID: \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\") " pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.725083 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91a8fb54-0ae5-4d8d-9a67-bd003820f573-utilities\") pod \"redhat-marketplace-fqh8v\" (UID: \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\") " pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.725260 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91a8fb54-0ae5-4d8d-9a67-bd003820f573-catalog-content\") pod \"redhat-marketplace-fqh8v\" (UID: \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\") " pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:48 crc kubenswrapper[4800]: I1125 17:41:48.760832 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjlps\" (UniqueName: \"kubernetes.io/projected/91a8fb54-0ae5-4d8d-9a67-bd003820f573-kube-api-access-jjlps\") pod \"redhat-marketplace-fqh8v\" (UID: \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\") " pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:49 crc kubenswrapper[4800]: I1125 17:41:49.051098 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:49 crc kubenswrapper[4800]: I1125 17:41:49.109431 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z2qfj"] Nov 25 17:41:49 crc kubenswrapper[4800]: W1125 17:41:49.499669 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91a8fb54_0ae5_4d8d_9a67_bd003820f573.slice/crio-6edae538bd87a12c3bbfeb756f8dbf3a35cf53071a0e83877ece8f26df7b39ef WatchSource:0}: Error finding container 6edae538bd87a12c3bbfeb756f8dbf3a35cf53071a0e83877ece8f26df7b39ef: Status 404 returned error can't find the container with id 6edae538bd87a12c3bbfeb756f8dbf3a35cf53071a0e83877ece8f26df7b39ef Nov 25 17:41:49 crc kubenswrapper[4800]: I1125 17:41:49.500454 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fqh8v"] Nov 25 17:41:49 crc kubenswrapper[4800]: I1125 17:41:49.560748 4800 generic.go:334] "Generic (PLEG): container finished" podID="344e77b2-719f-4e7f-b6dc-f68e02dbf240" containerID="5e36a7077b94a5e42e94908828f9357e94b832ad903850c6c72cc09f69f0cf0b" exitCode=0 Nov 25 17:41:49 crc kubenswrapper[4800]: I1125 17:41:49.560894 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z2qfj" event={"ID":"344e77b2-719f-4e7f-b6dc-f68e02dbf240","Type":"ContainerDied","Data":"5e36a7077b94a5e42e94908828f9357e94b832ad903850c6c72cc09f69f0cf0b"} Nov 25 17:41:49 crc kubenswrapper[4800]: I1125 17:41:49.560937 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z2qfj" event={"ID":"344e77b2-719f-4e7f-b6dc-f68e02dbf240","Type":"ContainerStarted","Data":"ee876394941169691fbbc62fad87ab260934ad62b0afac39796a226f91b83ed9"} Nov 25 17:41:49 crc kubenswrapper[4800]: I1125 17:41:49.562820 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqh8v" event={"ID":"91a8fb54-0ae5-4d8d-9a67-bd003820f573","Type":"ContainerStarted","Data":"6edae538bd87a12c3bbfeb756f8dbf3a35cf53071a0e83877ece8f26df7b39ef"} Nov 25 17:41:50 crc kubenswrapper[4800]: I1125 17:41:50.574427 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z2qfj" event={"ID":"344e77b2-719f-4e7f-b6dc-f68e02dbf240","Type":"ContainerStarted","Data":"b4f2de7de5522d5e18ce1b1d074028a49df71a1c512367e6af4a72bc00547b98"} Nov 25 17:41:50 crc kubenswrapper[4800]: I1125 17:41:50.576852 4800 generic.go:334] "Generic (PLEG): container finished" podID="91a8fb54-0ae5-4d8d-9a67-bd003820f573" containerID="e99fe6ae94e0eeac5e42b7cf722f90eb861174e97e5de6a9b183d73319d698c0" exitCode=0 Nov 25 17:41:50 crc kubenswrapper[4800]: I1125 17:41:50.576927 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqh8v" event={"ID":"91a8fb54-0ae5-4d8d-9a67-bd003820f573","Type":"ContainerDied","Data":"e99fe6ae94e0eeac5e42b7cf722f90eb861174e97e5de6a9b183d73319d698c0"} Nov 25 17:41:51 crc kubenswrapper[4800]: I1125 17:41:51.590946 4800 generic.go:334] "Generic (PLEG): container finished" podID="344e77b2-719f-4e7f-b6dc-f68e02dbf240" containerID="b4f2de7de5522d5e18ce1b1d074028a49df71a1c512367e6af4a72bc00547b98" exitCode=0 Nov 25 17:41:51 crc kubenswrapper[4800]: I1125 17:41:51.591171 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z2qfj" event={"ID":"344e77b2-719f-4e7f-b6dc-f68e02dbf240","Type":"ContainerDied","Data":"b4f2de7de5522d5e18ce1b1d074028a49df71a1c512367e6af4a72bc00547b98"} Nov 25 17:41:51 crc kubenswrapper[4800]: I1125 17:41:51.597197 4800 generic.go:334] "Generic (PLEG): container finished" podID="91a8fb54-0ae5-4d8d-9a67-bd003820f573" containerID="559b3711fdb1830976ff6048d8db40b89a19929d87b8009a4e668d9b0fe09de8" exitCode=0 Nov 25 17:41:51 crc kubenswrapper[4800]: I1125 17:41:51.597252 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqh8v" event={"ID":"91a8fb54-0ae5-4d8d-9a67-bd003820f573","Type":"ContainerDied","Data":"559b3711fdb1830976ff6048d8db40b89a19929d87b8009a4e668d9b0fe09de8"} Nov 25 17:41:52 crc kubenswrapper[4800]: I1125 17:41:52.613196 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqh8v" event={"ID":"91a8fb54-0ae5-4d8d-9a67-bd003820f573","Type":"ContainerStarted","Data":"438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf"} Nov 25 17:41:52 crc kubenswrapper[4800]: I1125 17:41:52.616535 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z2qfj" event={"ID":"344e77b2-719f-4e7f-b6dc-f68e02dbf240","Type":"ContainerStarted","Data":"ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc"} Nov 25 17:41:52 crc kubenswrapper[4800]: I1125 17:41:52.638094 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fqh8v" podStartSLOduration=3.150522823 podStartE2EDuration="4.638062328s" podCreationTimestamp="2025-11-25 17:41:48 +0000 UTC" firstStartedPulling="2025-11-25 17:41:50.579140631 +0000 UTC m=+8671.633549113" lastFinishedPulling="2025-11-25 17:41:52.066680096 +0000 UTC m=+8673.121088618" observedRunningTime="2025-11-25 17:41:52.636457134 +0000 UTC m=+8673.690865656" watchObservedRunningTime="2025-11-25 17:41:52.638062328 +0000 UTC m=+8673.692470850" Nov 25 17:41:52 crc kubenswrapper[4800]: I1125 17:41:52.672678 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z2qfj" podStartSLOduration=2.217795731 podStartE2EDuration="4.672652349s" podCreationTimestamp="2025-11-25 17:41:48 +0000 UTC" firstStartedPulling="2025-11-25 17:41:49.564305985 +0000 UTC m=+8670.618714477" lastFinishedPulling="2025-11-25 17:41:52.019162573 +0000 UTC m=+8673.073571095" observedRunningTime="2025-11-25 17:41:52.663388437 +0000 UTC m=+8673.717796929" watchObservedRunningTime="2025-11-25 17:41:52.672652349 +0000 UTC m=+8673.727060871" Nov 25 17:41:58 crc kubenswrapper[4800]: I1125 17:41:58.594175 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:58 crc kubenswrapper[4800]: I1125 17:41:58.594746 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:58 crc kubenswrapper[4800]: I1125 17:41:58.685459 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:58 crc kubenswrapper[4800]: I1125 17:41:58.762330 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:41:58 crc kubenswrapper[4800]: I1125 17:41:58.943082 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z2qfj"] Nov 25 17:41:59 crc kubenswrapper[4800]: I1125 17:41:59.051813 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:59 crc kubenswrapper[4800]: I1125 17:41:59.051931 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:59 crc kubenswrapper[4800]: I1125 17:41:59.144218 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:41:59 crc kubenswrapper[4800]: I1125 17:41:59.807500 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:42:00 crc kubenswrapper[4800]: I1125 17:42:00.706976 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z2qfj" podUID="344e77b2-719f-4e7f-b6dc-f68e02dbf240" containerName="registry-server" containerID="cri-o://ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc" gracePeriod=2 Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.229255 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.319656 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58gzk\" (UniqueName: \"kubernetes.io/projected/344e77b2-719f-4e7f-b6dc-f68e02dbf240-kube-api-access-58gzk\") pod \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\" (UID: \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\") " Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.319820 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/344e77b2-719f-4e7f-b6dc-f68e02dbf240-catalog-content\") pod \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\" (UID: \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\") " Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.319960 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/344e77b2-719f-4e7f-b6dc-f68e02dbf240-utilities\") pod \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\" (UID: \"344e77b2-719f-4e7f-b6dc-f68e02dbf240\") " Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.321696 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/344e77b2-719f-4e7f-b6dc-f68e02dbf240-utilities" (OuterVolumeSpecName: "utilities") pod "344e77b2-719f-4e7f-b6dc-f68e02dbf240" (UID: "344e77b2-719f-4e7f-b6dc-f68e02dbf240"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.329936 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fqh8v"] Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.331423 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/344e77b2-719f-4e7f-b6dc-f68e02dbf240-kube-api-access-58gzk" (OuterVolumeSpecName: "kube-api-access-58gzk") pod "344e77b2-719f-4e7f-b6dc-f68e02dbf240" (UID: "344e77b2-719f-4e7f-b6dc-f68e02dbf240"). InnerVolumeSpecName "kube-api-access-58gzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.422911 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/344e77b2-719f-4e7f-b6dc-f68e02dbf240-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.422983 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58gzk\" (UniqueName: \"kubernetes.io/projected/344e77b2-719f-4e7f-b6dc-f68e02dbf240-kube-api-access-58gzk\") on node \"crc\" DevicePath \"\"" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.554637 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/344e77b2-719f-4e7f-b6dc-f68e02dbf240-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "344e77b2-719f-4e7f-b6dc-f68e02dbf240" (UID: "344e77b2-719f-4e7f-b6dc-f68e02dbf240"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.626605 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/344e77b2-719f-4e7f-b6dc-f68e02dbf240-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.720768 4800 generic.go:334] "Generic (PLEG): container finished" podID="344e77b2-719f-4e7f-b6dc-f68e02dbf240" containerID="ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc" exitCode=0 Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.720882 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z2qfj" event={"ID":"344e77b2-719f-4e7f-b6dc-f68e02dbf240","Type":"ContainerDied","Data":"ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc"} Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.720967 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z2qfj" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.721751 4800 scope.go:117] "RemoveContainer" containerID="ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.722132 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fqh8v" podUID="91a8fb54-0ae5-4d8d-9a67-bd003820f573" containerName="registry-server" containerID="cri-o://438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf" gracePeriod=2 Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.724951 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z2qfj" event={"ID":"344e77b2-719f-4e7f-b6dc-f68e02dbf240","Type":"ContainerDied","Data":"ee876394941169691fbbc62fad87ab260934ad62b0afac39796a226f91b83ed9"} Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.753797 4800 scope.go:117] "RemoveContainer" containerID="b4f2de7de5522d5e18ce1b1d074028a49df71a1c512367e6af4a72bc00547b98" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.782119 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z2qfj"] Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.801575 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z2qfj"] Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.840245 4800 scope.go:117] "RemoveContainer" containerID="5e36a7077b94a5e42e94908828f9357e94b832ad903850c6c72cc09f69f0cf0b" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.931496 4800 scope.go:117] "RemoveContainer" containerID="ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc" Nov 25 17:42:01 crc kubenswrapper[4800]: E1125 17:42:01.935280 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc\": container with ID starting with ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc not found: ID does not exist" containerID="ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.935340 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc"} err="failed to get container status \"ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc\": rpc error: code = NotFound desc = could not find container \"ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc\": container with ID starting with ec2f3ded953e4064c8988a4aab40835ca2cbf1abe0f3b11575a035538ee3b6bc not found: ID does not exist" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.935372 4800 scope.go:117] "RemoveContainer" containerID="b4f2de7de5522d5e18ce1b1d074028a49df71a1c512367e6af4a72bc00547b98" Nov 25 17:42:01 crc kubenswrapper[4800]: E1125 17:42:01.937146 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4f2de7de5522d5e18ce1b1d074028a49df71a1c512367e6af4a72bc00547b98\": container with ID starting with b4f2de7de5522d5e18ce1b1d074028a49df71a1c512367e6af4a72bc00547b98 not found: ID does not exist" containerID="b4f2de7de5522d5e18ce1b1d074028a49df71a1c512367e6af4a72bc00547b98" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.937182 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4f2de7de5522d5e18ce1b1d074028a49df71a1c512367e6af4a72bc00547b98"} err="failed to get container status \"b4f2de7de5522d5e18ce1b1d074028a49df71a1c512367e6af4a72bc00547b98\": rpc error: code = NotFound desc = could not find container \"b4f2de7de5522d5e18ce1b1d074028a49df71a1c512367e6af4a72bc00547b98\": container with ID starting with b4f2de7de5522d5e18ce1b1d074028a49df71a1c512367e6af4a72bc00547b98 not found: ID does not exist" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.937203 4800 scope.go:117] "RemoveContainer" containerID="5e36a7077b94a5e42e94908828f9357e94b832ad903850c6c72cc09f69f0cf0b" Nov 25 17:42:01 crc kubenswrapper[4800]: E1125 17:42:01.938502 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e36a7077b94a5e42e94908828f9357e94b832ad903850c6c72cc09f69f0cf0b\": container with ID starting with 5e36a7077b94a5e42e94908828f9357e94b832ad903850c6c72cc09f69f0cf0b not found: ID does not exist" containerID="5e36a7077b94a5e42e94908828f9357e94b832ad903850c6c72cc09f69f0cf0b" Nov 25 17:42:01 crc kubenswrapper[4800]: I1125 17:42:01.938537 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e36a7077b94a5e42e94908828f9357e94b832ad903850c6c72cc09f69f0cf0b"} err="failed to get container status \"5e36a7077b94a5e42e94908828f9357e94b832ad903850c6c72cc09f69f0cf0b\": rpc error: code = NotFound desc = could not find container \"5e36a7077b94a5e42e94908828f9357e94b832ad903850c6c72cc09f69f0cf0b\": container with ID starting with 5e36a7077b94a5e42e94908828f9357e94b832ad903850c6c72cc09f69f0cf0b not found: ID does not exist" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.284737 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.445071 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91a8fb54-0ae5-4d8d-9a67-bd003820f573-utilities\") pod \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\" (UID: \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\") " Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.445281 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjlps\" (UniqueName: \"kubernetes.io/projected/91a8fb54-0ae5-4d8d-9a67-bd003820f573-kube-api-access-jjlps\") pod \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\" (UID: \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\") " Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.445346 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91a8fb54-0ae5-4d8d-9a67-bd003820f573-catalog-content\") pod \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\" (UID: \"91a8fb54-0ae5-4d8d-9a67-bd003820f573\") " Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.446061 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91a8fb54-0ae5-4d8d-9a67-bd003820f573-utilities" (OuterVolumeSpecName: "utilities") pod "91a8fb54-0ae5-4d8d-9a67-bd003820f573" (UID: "91a8fb54-0ae5-4d8d-9a67-bd003820f573"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.450821 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91a8fb54-0ae5-4d8d-9a67-bd003820f573-kube-api-access-jjlps" (OuterVolumeSpecName: "kube-api-access-jjlps") pod "91a8fb54-0ae5-4d8d-9a67-bd003820f573" (UID: "91a8fb54-0ae5-4d8d-9a67-bd003820f573"). InnerVolumeSpecName "kube-api-access-jjlps". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.464719 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91a8fb54-0ae5-4d8d-9a67-bd003820f573-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "91a8fb54-0ae5-4d8d-9a67-bd003820f573" (UID: "91a8fb54-0ae5-4d8d-9a67-bd003820f573"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.548061 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjlps\" (UniqueName: \"kubernetes.io/projected/91a8fb54-0ae5-4d8d-9a67-bd003820f573-kube-api-access-jjlps\") on node \"crc\" DevicePath \"\"" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.548106 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91a8fb54-0ae5-4d8d-9a67-bd003820f573-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.548119 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91a8fb54-0ae5-4d8d-9a67-bd003820f573-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.733583 4800 generic.go:334] "Generic (PLEG): container finished" podID="91a8fb54-0ae5-4d8d-9a67-bd003820f573" containerID="438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf" exitCode=0 Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.733697 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqh8v" event={"ID":"91a8fb54-0ae5-4d8d-9a67-bd003820f573","Type":"ContainerDied","Data":"438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf"} Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.733737 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqh8v" event={"ID":"91a8fb54-0ae5-4d8d-9a67-bd003820f573","Type":"ContainerDied","Data":"6edae538bd87a12c3bbfeb756f8dbf3a35cf53071a0e83877ece8f26df7b39ef"} Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.733738 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fqh8v" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.733765 4800 scope.go:117] "RemoveContainer" containerID="438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.759198 4800 scope.go:117] "RemoveContainer" containerID="559b3711fdb1830976ff6048d8db40b89a19929d87b8009a4e668d9b0fe09de8" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.788166 4800 scope.go:117] "RemoveContainer" containerID="e99fe6ae94e0eeac5e42b7cf722f90eb861174e97e5de6a9b183d73319d698c0" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.789248 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fqh8v"] Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.800838 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fqh8v"] Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.813598 4800 scope.go:117] "RemoveContainer" containerID="438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf" Nov 25 17:42:02 crc kubenswrapper[4800]: E1125 17:42:02.814115 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf\": container with ID starting with 438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf not found: ID does not exist" containerID="438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.814173 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf"} err="failed to get container status \"438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf\": rpc error: code = NotFound desc = could not find container \"438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf\": container with ID starting with 438ab162210e39df97e1d27081892850c21ef46d175aca409916654879364acf not found: ID does not exist" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.814241 4800 scope.go:117] "RemoveContainer" containerID="559b3711fdb1830976ff6048d8db40b89a19929d87b8009a4e668d9b0fe09de8" Nov 25 17:42:02 crc kubenswrapper[4800]: E1125 17:42:02.814761 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"559b3711fdb1830976ff6048d8db40b89a19929d87b8009a4e668d9b0fe09de8\": container with ID starting with 559b3711fdb1830976ff6048d8db40b89a19929d87b8009a4e668d9b0fe09de8 not found: ID does not exist" containerID="559b3711fdb1830976ff6048d8db40b89a19929d87b8009a4e668d9b0fe09de8" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.814816 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"559b3711fdb1830976ff6048d8db40b89a19929d87b8009a4e668d9b0fe09de8"} err="failed to get container status \"559b3711fdb1830976ff6048d8db40b89a19929d87b8009a4e668d9b0fe09de8\": rpc error: code = NotFound desc = could not find container \"559b3711fdb1830976ff6048d8db40b89a19929d87b8009a4e668d9b0fe09de8\": container with ID starting with 559b3711fdb1830976ff6048d8db40b89a19929d87b8009a4e668d9b0fe09de8 not found: ID does not exist" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.814893 4800 scope.go:117] "RemoveContainer" containerID="e99fe6ae94e0eeac5e42b7cf722f90eb861174e97e5de6a9b183d73319d698c0" Nov 25 17:42:02 crc kubenswrapper[4800]: E1125 17:42:02.815234 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e99fe6ae94e0eeac5e42b7cf722f90eb861174e97e5de6a9b183d73319d698c0\": container with ID starting with e99fe6ae94e0eeac5e42b7cf722f90eb861174e97e5de6a9b183d73319d698c0 not found: ID does not exist" containerID="e99fe6ae94e0eeac5e42b7cf722f90eb861174e97e5de6a9b183d73319d698c0" Nov 25 17:42:02 crc kubenswrapper[4800]: I1125 17:42:02.815270 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e99fe6ae94e0eeac5e42b7cf722f90eb861174e97e5de6a9b183d73319d698c0"} err="failed to get container status \"e99fe6ae94e0eeac5e42b7cf722f90eb861174e97e5de6a9b183d73319d698c0\": rpc error: code = NotFound desc = could not find container \"e99fe6ae94e0eeac5e42b7cf722f90eb861174e97e5de6a9b183d73319d698c0\": container with ID starting with e99fe6ae94e0eeac5e42b7cf722f90eb861174e97e5de6a9b183d73319d698c0 not found: ID does not exist" Nov 25 17:42:03 crc kubenswrapper[4800]: I1125 17:42:03.798576 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="344e77b2-719f-4e7f-b6dc-f68e02dbf240" path="/var/lib/kubelet/pods/344e77b2-719f-4e7f-b6dc-f68e02dbf240/volumes" Nov 25 17:42:03 crc kubenswrapper[4800]: I1125 17:42:03.799444 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91a8fb54-0ae5-4d8d-9a67-bd003820f573" path="/var/lib/kubelet/pods/91a8fb54-0ae5-4d8d-9a67-bd003820f573/volumes" Nov 25 17:43:12 crc kubenswrapper[4800]: I1125 17:43:12.640301 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:43:12 crc kubenswrapper[4800]: I1125 17:43:12.640992 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:43:42 crc kubenswrapper[4800]: I1125 17:43:42.639960 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:43:42 crc kubenswrapper[4800]: I1125 17:43:42.640576 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:44:12 crc kubenswrapper[4800]: I1125 17:44:12.640327 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:44:12 crc kubenswrapper[4800]: I1125 17:44:12.641216 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:44:12 crc kubenswrapper[4800]: I1125 17:44:12.641287 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 17:44:12 crc kubenswrapper[4800]: I1125 17:44:12.642265 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:44:12 crc kubenswrapper[4800]: I1125 17:44:12.642365 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" gracePeriod=600 Nov 25 17:44:12 crc kubenswrapper[4800]: E1125 17:44:12.784313 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:44:13 crc kubenswrapper[4800]: I1125 17:44:13.279198 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" exitCode=0 Nov 25 17:44:13 crc kubenswrapper[4800]: I1125 17:44:13.279259 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09"} Nov 25 17:44:13 crc kubenswrapper[4800]: I1125 17:44:13.279308 4800 scope.go:117] "RemoveContainer" containerID="078b57d8a5f5024f1311b46f754707d63dba82cd527688596873eddf4031ce8d" Nov 25 17:44:13 crc kubenswrapper[4800]: I1125 17:44:13.280446 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:44:13 crc kubenswrapper[4800]: E1125 17:44:13.281168 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:44:28 crc kubenswrapper[4800]: I1125 17:44:28.786759 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:44:28 crc kubenswrapper[4800]: E1125 17:44:28.788039 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:44:43 crc kubenswrapper[4800]: I1125 17:44:43.786280 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:44:43 crc kubenswrapper[4800]: E1125 17:44:43.787519 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:44:56 crc kubenswrapper[4800]: I1125 17:44:56.787646 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:44:56 crc kubenswrapper[4800]: E1125 17:44:56.788368 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.166335 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4"] Nov 25 17:45:00 crc kubenswrapper[4800]: E1125 17:45:00.167435 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91a8fb54-0ae5-4d8d-9a67-bd003820f573" containerName="extract-utilities" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.167454 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="91a8fb54-0ae5-4d8d-9a67-bd003820f573" containerName="extract-utilities" Nov 25 17:45:00 crc kubenswrapper[4800]: E1125 17:45:00.167509 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91a8fb54-0ae5-4d8d-9a67-bd003820f573" containerName="registry-server" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.167519 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="91a8fb54-0ae5-4d8d-9a67-bd003820f573" containerName="registry-server" Nov 25 17:45:00 crc kubenswrapper[4800]: E1125 17:45:00.167531 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="344e77b2-719f-4e7f-b6dc-f68e02dbf240" containerName="extract-utilities" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.167538 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="344e77b2-719f-4e7f-b6dc-f68e02dbf240" containerName="extract-utilities" Nov 25 17:45:00 crc kubenswrapper[4800]: E1125 17:45:00.167549 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="344e77b2-719f-4e7f-b6dc-f68e02dbf240" containerName="registry-server" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.167557 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="344e77b2-719f-4e7f-b6dc-f68e02dbf240" containerName="registry-server" Nov 25 17:45:00 crc kubenswrapper[4800]: E1125 17:45:00.167571 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91a8fb54-0ae5-4d8d-9a67-bd003820f573" containerName="extract-content" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.167578 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="91a8fb54-0ae5-4d8d-9a67-bd003820f573" containerName="extract-content" Nov 25 17:45:00 crc kubenswrapper[4800]: E1125 17:45:00.167599 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="344e77b2-719f-4e7f-b6dc-f68e02dbf240" containerName="extract-content" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.167606 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="344e77b2-719f-4e7f-b6dc-f68e02dbf240" containerName="extract-content" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.167877 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="91a8fb54-0ae5-4d8d-9a67-bd003820f573" containerName="registry-server" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.167904 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="344e77b2-719f-4e7f-b6dc-f68e02dbf240" containerName="registry-server" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.168765 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.172077 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.172230 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.181548 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4"] Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.225209 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ftkj\" (UniqueName: \"kubernetes.io/projected/5518a166-486a-4044-9e36-352f4f34af8b-kube-api-access-9ftkj\") pod \"collect-profiles-29401545-cc5k4\" (UID: \"5518a166-486a-4044-9e36-352f4f34af8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.225315 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5518a166-486a-4044-9e36-352f4f34af8b-secret-volume\") pod \"collect-profiles-29401545-cc5k4\" (UID: \"5518a166-486a-4044-9e36-352f4f34af8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.225351 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5518a166-486a-4044-9e36-352f4f34af8b-config-volume\") pod \"collect-profiles-29401545-cc5k4\" (UID: \"5518a166-486a-4044-9e36-352f4f34af8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.326754 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ftkj\" (UniqueName: \"kubernetes.io/projected/5518a166-486a-4044-9e36-352f4f34af8b-kube-api-access-9ftkj\") pod \"collect-profiles-29401545-cc5k4\" (UID: \"5518a166-486a-4044-9e36-352f4f34af8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.327179 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5518a166-486a-4044-9e36-352f4f34af8b-secret-volume\") pod \"collect-profiles-29401545-cc5k4\" (UID: \"5518a166-486a-4044-9e36-352f4f34af8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.327216 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5518a166-486a-4044-9e36-352f4f34af8b-config-volume\") pod \"collect-profiles-29401545-cc5k4\" (UID: \"5518a166-486a-4044-9e36-352f4f34af8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.328244 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5518a166-486a-4044-9e36-352f4f34af8b-config-volume\") pod \"collect-profiles-29401545-cc5k4\" (UID: \"5518a166-486a-4044-9e36-352f4f34af8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.338604 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5518a166-486a-4044-9e36-352f4f34af8b-secret-volume\") pod \"collect-profiles-29401545-cc5k4\" (UID: \"5518a166-486a-4044-9e36-352f4f34af8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.345265 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ftkj\" (UniqueName: \"kubernetes.io/projected/5518a166-486a-4044-9e36-352f4f34af8b-kube-api-access-9ftkj\") pod \"collect-profiles-29401545-cc5k4\" (UID: \"5518a166-486a-4044-9e36-352f4f34af8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.492044 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:00 crc kubenswrapper[4800]: I1125 17:45:00.968629 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4"] Nov 25 17:45:00 crc kubenswrapper[4800]: W1125 17:45:00.971163 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5518a166_486a_4044_9e36_352f4f34af8b.slice/crio-f33c9ef4abffc394615d1338ac09e6de39ce0f77250c89067cf412bf3c2e9d16 WatchSource:0}: Error finding container f33c9ef4abffc394615d1338ac09e6de39ce0f77250c89067cf412bf3c2e9d16: Status 404 returned error can't find the container with id f33c9ef4abffc394615d1338ac09e6de39ce0f77250c89067cf412bf3c2e9d16 Nov 25 17:45:01 crc kubenswrapper[4800]: I1125 17:45:01.820619 4800 generic.go:334] "Generic (PLEG): container finished" podID="5518a166-486a-4044-9e36-352f4f34af8b" containerID="25d86a6b914b4589c176ff32887baea3b98163bf2a3f736b9f9e5a01f5ec028b" exitCode=0 Nov 25 17:45:01 crc kubenswrapper[4800]: I1125 17:45:01.820685 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" event={"ID":"5518a166-486a-4044-9e36-352f4f34af8b","Type":"ContainerDied","Data":"25d86a6b914b4589c176ff32887baea3b98163bf2a3f736b9f9e5a01f5ec028b"} Nov 25 17:45:01 crc kubenswrapper[4800]: I1125 17:45:01.821194 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" event={"ID":"5518a166-486a-4044-9e36-352f4f34af8b","Type":"ContainerStarted","Data":"f33c9ef4abffc394615d1338ac09e6de39ce0f77250c89067cf412bf3c2e9d16"} Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.208003 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.288260 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ftkj\" (UniqueName: \"kubernetes.io/projected/5518a166-486a-4044-9e36-352f4f34af8b-kube-api-access-9ftkj\") pod \"5518a166-486a-4044-9e36-352f4f34af8b\" (UID: \"5518a166-486a-4044-9e36-352f4f34af8b\") " Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.288541 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5518a166-486a-4044-9e36-352f4f34af8b-config-volume\") pod \"5518a166-486a-4044-9e36-352f4f34af8b\" (UID: \"5518a166-486a-4044-9e36-352f4f34af8b\") " Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.288716 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5518a166-486a-4044-9e36-352f4f34af8b-secret-volume\") pod \"5518a166-486a-4044-9e36-352f4f34af8b\" (UID: \"5518a166-486a-4044-9e36-352f4f34af8b\") " Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.288945 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5518a166-486a-4044-9e36-352f4f34af8b-config-volume" (OuterVolumeSpecName: "config-volume") pod "5518a166-486a-4044-9e36-352f4f34af8b" (UID: "5518a166-486a-4044-9e36-352f4f34af8b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.289920 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5518a166-486a-4044-9e36-352f4f34af8b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.293455 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5518a166-486a-4044-9e36-352f4f34af8b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5518a166-486a-4044-9e36-352f4f34af8b" (UID: "5518a166-486a-4044-9e36-352f4f34af8b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.295068 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5518a166-486a-4044-9e36-352f4f34af8b-kube-api-access-9ftkj" (OuterVolumeSpecName: "kube-api-access-9ftkj") pod "5518a166-486a-4044-9e36-352f4f34af8b" (UID: "5518a166-486a-4044-9e36-352f4f34af8b"). InnerVolumeSpecName "kube-api-access-9ftkj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.392315 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ftkj\" (UniqueName: \"kubernetes.io/projected/5518a166-486a-4044-9e36-352f4f34af8b-kube-api-access-9ftkj\") on node \"crc\" DevicePath \"\"" Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.392355 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5518a166-486a-4044-9e36-352f4f34af8b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.843075 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" event={"ID":"5518a166-486a-4044-9e36-352f4f34af8b","Type":"ContainerDied","Data":"f33c9ef4abffc394615d1338ac09e6de39ce0f77250c89067cf412bf3c2e9d16"} Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.843398 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f33c9ef4abffc394615d1338ac09e6de39ce0f77250c89067cf412bf3c2e9d16" Nov 25 17:45:03 crc kubenswrapper[4800]: I1125 17:45:03.843135 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-cc5k4" Nov 25 17:45:04 crc kubenswrapper[4800]: I1125 17:45:04.316153 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp"] Nov 25 17:45:04 crc kubenswrapper[4800]: I1125 17:45:04.331154 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401500-q9vgp"] Nov 25 17:45:05 crc kubenswrapper[4800]: I1125 17:45:05.795213 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="085da92b-709c-4295-bf75-3f70925c16a1" path="/var/lib/kubelet/pods/085da92b-709c-4295-bf75-3f70925c16a1/volumes" Nov 25 17:45:09 crc kubenswrapper[4800]: I1125 17:45:09.800228 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:45:09 crc kubenswrapper[4800]: E1125 17:45:09.801164 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:45:20 crc kubenswrapper[4800]: I1125 17:45:20.785857 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:45:20 crc kubenswrapper[4800]: E1125 17:45:20.786413 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:45:33 crc kubenswrapper[4800]: I1125 17:45:33.786923 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:45:33 crc kubenswrapper[4800]: E1125 17:45:33.788495 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:45:34 crc kubenswrapper[4800]: I1125 17:45:34.222163 4800 scope.go:117] "RemoveContainer" containerID="6464fe4d48a54baf61038543dcb6d719b7770a027e0d27fb7656b1447a103ce6" Nov 25 17:45:46 crc kubenswrapper[4800]: I1125 17:45:46.786132 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:45:46 crc kubenswrapper[4800]: E1125 17:45:46.787089 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:46:01 crc kubenswrapper[4800]: I1125 17:46:01.786538 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:46:01 crc kubenswrapper[4800]: E1125 17:46:01.787698 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:46:12 crc kubenswrapper[4800]: I1125 17:46:12.785502 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:46:12 crc kubenswrapper[4800]: E1125 17:46:12.786243 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:46:23 crc kubenswrapper[4800]: I1125 17:46:23.786357 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:46:23 crc kubenswrapper[4800]: E1125 17:46:23.789026 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:46:36 crc kubenswrapper[4800]: I1125 17:46:36.785496 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:46:36 crc kubenswrapper[4800]: E1125 17:46:36.786402 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:46:51 crc kubenswrapper[4800]: I1125 17:46:51.786422 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:46:51 crc kubenswrapper[4800]: E1125 17:46:51.787092 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:47:02 crc kubenswrapper[4800]: I1125 17:47:02.786256 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:47:02 crc kubenswrapper[4800]: E1125 17:47:02.787623 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:47:17 crc kubenswrapper[4800]: I1125 17:47:17.793609 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:47:17 crc kubenswrapper[4800]: E1125 17:47:17.795175 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:47:31 crc kubenswrapper[4800]: I1125 17:47:31.785503 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:47:31 crc kubenswrapper[4800]: E1125 17:47:31.786305 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:47:43 crc kubenswrapper[4800]: I1125 17:47:43.786927 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:47:43 crc kubenswrapper[4800]: E1125 17:47:43.789384 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:47:58 crc kubenswrapper[4800]: I1125 17:47:58.786407 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:47:58 crc kubenswrapper[4800]: E1125 17:47:58.788029 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:48:10 crc kubenswrapper[4800]: I1125 17:48:10.786016 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:48:10 crc kubenswrapper[4800]: E1125 17:48:10.786943 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:48:22 crc kubenswrapper[4800]: I1125 17:48:22.786722 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:48:22 crc kubenswrapper[4800]: E1125 17:48:22.787803 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:48:37 crc kubenswrapper[4800]: I1125 17:48:37.785765 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:48:37 crc kubenswrapper[4800]: E1125 17:48:37.788540 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:48:48 crc kubenswrapper[4800]: I1125 17:48:48.784824 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:48:48 crc kubenswrapper[4800]: E1125 17:48:48.785422 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:49:00 crc kubenswrapper[4800]: I1125 17:49:00.785561 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:49:00 crc kubenswrapper[4800]: E1125 17:49:00.786461 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:49:12 crc kubenswrapper[4800]: I1125 17:49:12.785263 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:49:13 crc kubenswrapper[4800]: I1125 17:49:13.799643 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"36804d756446b6fef336440010964b334919600cf19b967f42dfb309ecd8ca93"} Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.422017 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6gj76"] Nov 25 17:50:13 crc kubenswrapper[4800]: E1125 17:50:13.423679 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5518a166-486a-4044-9e36-352f4f34af8b" containerName="collect-profiles" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.423711 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5518a166-486a-4044-9e36-352f4f34af8b" containerName="collect-profiles" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.424237 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="5518a166-486a-4044-9e36-352f4f34af8b" containerName="collect-profiles" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.430478 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.439631 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6gj76"] Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.485105 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkz46\" (UniqueName: \"kubernetes.io/projected/e3a0c059-60af-49d9-b072-bac854d85566-kube-api-access-rkz46\") pod \"redhat-operators-6gj76\" (UID: \"e3a0c059-60af-49d9-b072-bac854d85566\") " pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.485243 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3a0c059-60af-49d9-b072-bac854d85566-catalog-content\") pod \"redhat-operators-6gj76\" (UID: \"e3a0c059-60af-49d9-b072-bac854d85566\") " pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.485284 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3a0c059-60af-49d9-b072-bac854d85566-utilities\") pod \"redhat-operators-6gj76\" (UID: \"e3a0c059-60af-49d9-b072-bac854d85566\") " pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.586711 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkz46\" (UniqueName: \"kubernetes.io/projected/e3a0c059-60af-49d9-b072-bac854d85566-kube-api-access-rkz46\") pod \"redhat-operators-6gj76\" (UID: \"e3a0c059-60af-49d9-b072-bac854d85566\") " pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.586831 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3a0c059-60af-49d9-b072-bac854d85566-catalog-content\") pod \"redhat-operators-6gj76\" (UID: \"e3a0c059-60af-49d9-b072-bac854d85566\") " pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.586889 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3a0c059-60af-49d9-b072-bac854d85566-utilities\") pod \"redhat-operators-6gj76\" (UID: \"e3a0c059-60af-49d9-b072-bac854d85566\") " pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.587777 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3a0c059-60af-49d9-b072-bac854d85566-catalog-content\") pod \"redhat-operators-6gj76\" (UID: \"e3a0c059-60af-49d9-b072-bac854d85566\") " pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.587810 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3a0c059-60af-49d9-b072-bac854d85566-utilities\") pod \"redhat-operators-6gj76\" (UID: \"e3a0c059-60af-49d9-b072-bac854d85566\") " pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.616783 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkz46\" (UniqueName: \"kubernetes.io/projected/e3a0c059-60af-49d9-b072-bac854d85566-kube-api-access-rkz46\") pod \"redhat-operators-6gj76\" (UID: \"e3a0c059-60af-49d9-b072-bac854d85566\") " pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:13 crc kubenswrapper[4800]: I1125 17:50:13.768950 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:14 crc kubenswrapper[4800]: I1125 17:50:14.252129 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6gj76"] Nov 25 17:50:14 crc kubenswrapper[4800]: W1125 17:50:14.253439 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode3a0c059_60af_49d9_b072_bac854d85566.slice/crio-6f200c5bb09b0d96112512a864ee8063766f3a5d58d331d6ce7671f93897a8a2 WatchSource:0}: Error finding container 6f200c5bb09b0d96112512a864ee8063766f3a5d58d331d6ce7671f93897a8a2: Status 404 returned error can't find the container with id 6f200c5bb09b0d96112512a864ee8063766f3a5d58d331d6ce7671f93897a8a2 Nov 25 17:50:14 crc kubenswrapper[4800]: I1125 17:50:14.455442 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gj76" event={"ID":"e3a0c059-60af-49d9-b072-bac854d85566","Type":"ContainerStarted","Data":"9d8b0411ac72822b22445834cdcd85f54da00bccf6705a0a8a5c204534b49ed6"} Nov 25 17:50:14 crc kubenswrapper[4800]: I1125 17:50:14.455738 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gj76" event={"ID":"e3a0c059-60af-49d9-b072-bac854d85566","Type":"ContainerStarted","Data":"6f200c5bb09b0d96112512a864ee8063766f3a5d58d331d6ce7671f93897a8a2"} Nov 25 17:50:15 crc kubenswrapper[4800]: I1125 17:50:15.468798 4800 generic.go:334] "Generic (PLEG): container finished" podID="e3a0c059-60af-49d9-b072-bac854d85566" containerID="9d8b0411ac72822b22445834cdcd85f54da00bccf6705a0a8a5c204534b49ed6" exitCode=0 Nov 25 17:50:15 crc kubenswrapper[4800]: I1125 17:50:15.469279 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gj76" event={"ID":"e3a0c059-60af-49d9-b072-bac854d85566","Type":"ContainerDied","Data":"9d8b0411ac72822b22445834cdcd85f54da00bccf6705a0a8a5c204534b49ed6"} Nov 25 17:50:15 crc kubenswrapper[4800]: I1125 17:50:15.473140 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.572779 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q5ssd"] Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.575230 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.592497 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q5ssd"] Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.763808 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/474f1601-c059-4d45-a559-71caf6d3d05b-catalog-content\") pod \"community-operators-q5ssd\" (UID: \"474f1601-c059-4d45-a559-71caf6d3d05b\") " pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.763893 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/474f1601-c059-4d45-a559-71caf6d3d05b-utilities\") pod \"community-operators-q5ssd\" (UID: \"474f1601-c059-4d45-a559-71caf6d3d05b\") " pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.763961 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7c6x\" (UniqueName: \"kubernetes.io/projected/474f1601-c059-4d45-a559-71caf6d3d05b-kube-api-access-c7c6x\") pod \"community-operators-q5ssd\" (UID: \"474f1601-c059-4d45-a559-71caf6d3d05b\") " pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.865179 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/474f1601-c059-4d45-a559-71caf6d3d05b-catalog-content\") pod \"community-operators-q5ssd\" (UID: \"474f1601-c059-4d45-a559-71caf6d3d05b\") " pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.865232 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/474f1601-c059-4d45-a559-71caf6d3d05b-utilities\") pod \"community-operators-q5ssd\" (UID: \"474f1601-c059-4d45-a559-71caf6d3d05b\") " pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.865267 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7c6x\" (UniqueName: \"kubernetes.io/projected/474f1601-c059-4d45-a559-71caf6d3d05b-kube-api-access-c7c6x\") pod \"community-operators-q5ssd\" (UID: \"474f1601-c059-4d45-a559-71caf6d3d05b\") " pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.866634 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/474f1601-c059-4d45-a559-71caf6d3d05b-catalog-content\") pod \"community-operators-q5ssd\" (UID: \"474f1601-c059-4d45-a559-71caf6d3d05b\") " pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.867032 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/474f1601-c059-4d45-a559-71caf6d3d05b-utilities\") pod \"community-operators-q5ssd\" (UID: \"474f1601-c059-4d45-a559-71caf6d3d05b\") " pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.883229 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7c6x\" (UniqueName: \"kubernetes.io/projected/474f1601-c059-4d45-a559-71caf6d3d05b-kube-api-access-c7c6x\") pod \"community-operators-q5ssd\" (UID: \"474f1601-c059-4d45-a559-71caf6d3d05b\") " pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:16 crc kubenswrapper[4800]: I1125 17:50:16.948029 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:17 crc kubenswrapper[4800]: I1125 17:50:17.480519 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q5ssd"] Nov 25 17:50:17 crc kubenswrapper[4800]: W1125 17:50:17.480660 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod474f1601_c059_4d45_a559_71caf6d3d05b.slice/crio-1594aa2ca086d567909a2aace6d6e0823603e3bb845fba52b7e0672699e6b3ff WatchSource:0}: Error finding container 1594aa2ca086d567909a2aace6d6e0823603e3bb845fba52b7e0672699e6b3ff: Status 404 returned error can't find the container with id 1594aa2ca086d567909a2aace6d6e0823603e3bb845fba52b7e0672699e6b3ff Nov 25 17:50:17 crc kubenswrapper[4800]: I1125 17:50:17.491199 4800 generic.go:334] "Generic (PLEG): container finished" podID="e3a0c059-60af-49d9-b072-bac854d85566" containerID="befdd9ad74d5ca0dcf7a376ccfc296057209e2404b9dba6e0fc3381717e0b923" exitCode=0 Nov 25 17:50:17 crc kubenswrapper[4800]: I1125 17:50:17.491240 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gj76" event={"ID":"e3a0c059-60af-49d9-b072-bac854d85566","Type":"ContainerDied","Data":"befdd9ad74d5ca0dcf7a376ccfc296057209e2404b9dba6e0fc3381717e0b923"} Nov 25 17:50:18 crc kubenswrapper[4800]: I1125 17:50:18.503283 4800 generic.go:334] "Generic (PLEG): container finished" podID="474f1601-c059-4d45-a559-71caf6d3d05b" containerID="eeba9a69354e50b1a0999cc8490a918054e5523c2a1841bea198658d12abae1f" exitCode=0 Nov 25 17:50:18 crc kubenswrapper[4800]: I1125 17:50:18.503373 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5ssd" event={"ID":"474f1601-c059-4d45-a559-71caf6d3d05b","Type":"ContainerDied","Data":"eeba9a69354e50b1a0999cc8490a918054e5523c2a1841bea198658d12abae1f"} Nov 25 17:50:18 crc kubenswrapper[4800]: I1125 17:50:18.506642 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5ssd" event={"ID":"474f1601-c059-4d45-a559-71caf6d3d05b","Type":"ContainerStarted","Data":"1594aa2ca086d567909a2aace6d6e0823603e3bb845fba52b7e0672699e6b3ff"} Nov 25 17:50:18 crc kubenswrapper[4800]: I1125 17:50:18.512683 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gj76" event={"ID":"e3a0c059-60af-49d9-b072-bac854d85566","Type":"ContainerStarted","Data":"5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213"} Nov 25 17:50:18 crc kubenswrapper[4800]: I1125 17:50:18.564303 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6gj76" podStartSLOduration=3.139216573 podStartE2EDuration="5.564278352s" podCreationTimestamp="2025-11-25 17:50:13 +0000 UTC" firstStartedPulling="2025-11-25 17:50:15.472756193 +0000 UTC m=+9176.527164685" lastFinishedPulling="2025-11-25 17:50:17.897817972 +0000 UTC m=+9178.952226464" observedRunningTime="2025-11-25 17:50:18.558356942 +0000 UTC m=+9179.612765424" watchObservedRunningTime="2025-11-25 17:50:18.564278352 +0000 UTC m=+9179.618686834" Nov 25 17:50:19 crc kubenswrapper[4800]: I1125 17:50:19.524037 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5ssd" event={"ID":"474f1601-c059-4d45-a559-71caf6d3d05b","Type":"ContainerStarted","Data":"2fd4c0cab226c9e57d3b7c4d788fd719028dda300343cb2823923c2ac04c970b"} Nov 25 17:50:20 crc kubenswrapper[4800]: I1125 17:50:20.548060 4800 generic.go:334] "Generic (PLEG): container finished" podID="474f1601-c059-4d45-a559-71caf6d3d05b" containerID="2fd4c0cab226c9e57d3b7c4d788fd719028dda300343cb2823923c2ac04c970b" exitCode=0 Nov 25 17:50:20 crc kubenswrapper[4800]: I1125 17:50:20.548115 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5ssd" event={"ID":"474f1601-c059-4d45-a559-71caf6d3d05b","Type":"ContainerDied","Data":"2fd4c0cab226c9e57d3b7c4d788fd719028dda300343cb2823923c2ac04c970b"} Nov 25 17:50:21 crc kubenswrapper[4800]: I1125 17:50:21.563665 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5ssd" event={"ID":"474f1601-c059-4d45-a559-71caf6d3d05b","Type":"ContainerStarted","Data":"007fa710671d53d80a74cc58f611050a71ee974248ebcaf7c62026d3a8ab11b3"} Nov 25 17:50:21 crc kubenswrapper[4800]: I1125 17:50:21.583282 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q5ssd" podStartSLOduration=3.051917208 podStartE2EDuration="5.583258647s" podCreationTimestamp="2025-11-25 17:50:16 +0000 UTC" firstStartedPulling="2025-11-25 17:50:18.505781574 +0000 UTC m=+9179.560190096" lastFinishedPulling="2025-11-25 17:50:21.037123013 +0000 UTC m=+9182.091531535" observedRunningTime="2025-11-25 17:50:21.577488681 +0000 UTC m=+9182.631897183" watchObservedRunningTime="2025-11-25 17:50:21.583258647 +0000 UTC m=+9182.637667139" Nov 25 17:50:23 crc kubenswrapper[4800]: I1125 17:50:23.784243 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:23 crc kubenswrapper[4800]: I1125 17:50:23.785422 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:23 crc kubenswrapper[4800]: I1125 17:50:23.863584 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:24 crc kubenswrapper[4800]: I1125 17:50:24.670900 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:24 crc kubenswrapper[4800]: I1125 17:50:24.968696 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6gj76"] Nov 25 17:50:26 crc kubenswrapper[4800]: I1125 17:50:26.613047 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6gj76" podUID="e3a0c059-60af-49d9-b072-bac854d85566" containerName="registry-server" containerID="cri-o://5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213" gracePeriod=2 Nov 25 17:50:26 crc kubenswrapper[4800]: I1125 17:50:26.948855 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:26 crc kubenswrapper[4800]: I1125 17:50:26.948913 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.020092 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.171559 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.293299 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3a0c059-60af-49d9-b072-bac854d85566-catalog-content\") pod \"e3a0c059-60af-49d9-b072-bac854d85566\" (UID: \"e3a0c059-60af-49d9-b072-bac854d85566\") " Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.293432 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkz46\" (UniqueName: \"kubernetes.io/projected/e3a0c059-60af-49d9-b072-bac854d85566-kube-api-access-rkz46\") pod \"e3a0c059-60af-49d9-b072-bac854d85566\" (UID: \"e3a0c059-60af-49d9-b072-bac854d85566\") " Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.293479 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3a0c059-60af-49d9-b072-bac854d85566-utilities\") pod \"e3a0c059-60af-49d9-b072-bac854d85566\" (UID: \"e3a0c059-60af-49d9-b072-bac854d85566\") " Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.294380 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3a0c059-60af-49d9-b072-bac854d85566-utilities" (OuterVolumeSpecName: "utilities") pod "e3a0c059-60af-49d9-b072-bac854d85566" (UID: "e3a0c059-60af-49d9-b072-bac854d85566"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.294977 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3a0c059-60af-49d9-b072-bac854d85566-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.298706 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3a0c059-60af-49d9-b072-bac854d85566-kube-api-access-rkz46" (OuterVolumeSpecName: "kube-api-access-rkz46") pod "e3a0c059-60af-49d9-b072-bac854d85566" (UID: "e3a0c059-60af-49d9-b072-bac854d85566"). InnerVolumeSpecName "kube-api-access-rkz46". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.396773 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkz46\" (UniqueName: \"kubernetes.io/projected/e3a0c059-60af-49d9-b072-bac854d85566-kube-api-access-rkz46\") on node \"crc\" DevicePath \"\"" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.622553 4800 generic.go:334] "Generic (PLEG): container finished" podID="e3a0c059-60af-49d9-b072-bac854d85566" containerID="5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213" exitCode=0 Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.622618 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gj76" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.622662 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gj76" event={"ID":"e3a0c059-60af-49d9-b072-bac854d85566","Type":"ContainerDied","Data":"5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213"} Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.622689 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gj76" event={"ID":"e3a0c059-60af-49d9-b072-bac854d85566","Type":"ContainerDied","Data":"6f200c5bb09b0d96112512a864ee8063766f3a5d58d331d6ce7671f93897a8a2"} Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.622706 4800 scope.go:117] "RemoveContainer" containerID="5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.658817 4800 scope.go:117] "RemoveContainer" containerID="befdd9ad74d5ca0dcf7a376ccfc296057209e2404b9dba6e0fc3381717e0b923" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.666119 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.685213 4800 scope.go:117] "RemoveContainer" containerID="9d8b0411ac72822b22445834cdcd85f54da00bccf6705a0a8a5c204534b49ed6" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.726038 4800 scope.go:117] "RemoveContainer" containerID="5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213" Nov 25 17:50:27 crc kubenswrapper[4800]: E1125 17:50:27.729249 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213\": container with ID starting with 5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213 not found: ID does not exist" containerID="5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.729320 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213"} err="failed to get container status \"5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213\": rpc error: code = NotFound desc = could not find container \"5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213\": container with ID starting with 5f9d695cad2ff47c2a9398e9bf3ed8ec400e9cba5dcc33e643acc5a3d8e3a213 not found: ID does not exist" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.729345 4800 scope.go:117] "RemoveContainer" containerID="befdd9ad74d5ca0dcf7a376ccfc296057209e2404b9dba6e0fc3381717e0b923" Nov 25 17:50:27 crc kubenswrapper[4800]: E1125 17:50:27.729813 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"befdd9ad74d5ca0dcf7a376ccfc296057209e2404b9dba6e0fc3381717e0b923\": container with ID starting with befdd9ad74d5ca0dcf7a376ccfc296057209e2404b9dba6e0fc3381717e0b923 not found: ID does not exist" containerID="befdd9ad74d5ca0dcf7a376ccfc296057209e2404b9dba6e0fc3381717e0b923" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.729835 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"befdd9ad74d5ca0dcf7a376ccfc296057209e2404b9dba6e0fc3381717e0b923"} err="failed to get container status \"befdd9ad74d5ca0dcf7a376ccfc296057209e2404b9dba6e0fc3381717e0b923\": rpc error: code = NotFound desc = could not find container \"befdd9ad74d5ca0dcf7a376ccfc296057209e2404b9dba6e0fc3381717e0b923\": container with ID starting with befdd9ad74d5ca0dcf7a376ccfc296057209e2404b9dba6e0fc3381717e0b923 not found: ID does not exist" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.729868 4800 scope.go:117] "RemoveContainer" containerID="9d8b0411ac72822b22445834cdcd85f54da00bccf6705a0a8a5c204534b49ed6" Nov 25 17:50:27 crc kubenswrapper[4800]: E1125 17:50:27.730222 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d8b0411ac72822b22445834cdcd85f54da00bccf6705a0a8a5c204534b49ed6\": container with ID starting with 9d8b0411ac72822b22445834cdcd85f54da00bccf6705a0a8a5c204534b49ed6 not found: ID does not exist" containerID="9d8b0411ac72822b22445834cdcd85f54da00bccf6705a0a8a5c204534b49ed6" Nov 25 17:50:27 crc kubenswrapper[4800]: I1125 17:50:27.730263 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d8b0411ac72822b22445834cdcd85f54da00bccf6705a0a8a5c204534b49ed6"} err="failed to get container status \"9d8b0411ac72822b22445834cdcd85f54da00bccf6705a0a8a5c204534b49ed6\": rpc error: code = NotFound desc = could not find container \"9d8b0411ac72822b22445834cdcd85f54da00bccf6705a0a8a5c204534b49ed6\": container with ID starting with 9d8b0411ac72822b22445834cdcd85f54da00bccf6705a0a8a5c204534b49ed6 not found: ID does not exist" Nov 25 17:50:28 crc kubenswrapper[4800]: I1125 17:50:28.107308 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3a0c059-60af-49d9-b072-bac854d85566-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e3a0c059-60af-49d9-b072-bac854d85566" (UID: "e3a0c059-60af-49d9-b072-bac854d85566"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:50:28 crc kubenswrapper[4800]: I1125 17:50:28.108579 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3a0c059-60af-49d9-b072-bac854d85566-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:50:28 crc kubenswrapper[4800]: I1125 17:50:28.272204 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6gj76"] Nov 25 17:50:28 crc kubenswrapper[4800]: I1125 17:50:28.289067 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6gj76"] Nov 25 17:50:29 crc kubenswrapper[4800]: I1125 17:50:29.804930 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3a0c059-60af-49d9-b072-bac854d85566" path="/var/lib/kubelet/pods/e3a0c059-60af-49d9-b072-bac854d85566/volumes" Nov 25 17:50:29 crc kubenswrapper[4800]: I1125 17:50:29.983400 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q5ssd"] Nov 25 17:50:29 crc kubenswrapper[4800]: I1125 17:50:29.983760 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q5ssd" podUID="474f1601-c059-4d45-a559-71caf6d3d05b" containerName="registry-server" containerID="cri-o://007fa710671d53d80a74cc58f611050a71ee974248ebcaf7c62026d3a8ab11b3" gracePeriod=2 Nov 25 17:50:30 crc kubenswrapper[4800]: I1125 17:50:30.669806 4800 generic.go:334] "Generic (PLEG): container finished" podID="474f1601-c059-4d45-a559-71caf6d3d05b" containerID="007fa710671d53d80a74cc58f611050a71ee974248ebcaf7c62026d3a8ab11b3" exitCode=0 Nov 25 17:50:30 crc kubenswrapper[4800]: I1125 17:50:30.669993 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5ssd" event={"ID":"474f1601-c059-4d45-a559-71caf6d3d05b","Type":"ContainerDied","Data":"007fa710671d53d80a74cc58f611050a71ee974248ebcaf7c62026d3a8ab11b3"} Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.027566 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.176472 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/474f1601-c059-4d45-a559-71caf6d3d05b-catalog-content\") pod \"474f1601-c059-4d45-a559-71caf6d3d05b\" (UID: \"474f1601-c059-4d45-a559-71caf6d3d05b\") " Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.176730 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7c6x\" (UniqueName: \"kubernetes.io/projected/474f1601-c059-4d45-a559-71caf6d3d05b-kube-api-access-c7c6x\") pod \"474f1601-c059-4d45-a559-71caf6d3d05b\" (UID: \"474f1601-c059-4d45-a559-71caf6d3d05b\") " Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.176870 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/474f1601-c059-4d45-a559-71caf6d3d05b-utilities\") pod \"474f1601-c059-4d45-a559-71caf6d3d05b\" (UID: \"474f1601-c059-4d45-a559-71caf6d3d05b\") " Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.178146 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/474f1601-c059-4d45-a559-71caf6d3d05b-utilities" (OuterVolumeSpecName: "utilities") pod "474f1601-c059-4d45-a559-71caf6d3d05b" (UID: "474f1601-c059-4d45-a559-71caf6d3d05b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.185293 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/474f1601-c059-4d45-a559-71caf6d3d05b-kube-api-access-c7c6x" (OuterVolumeSpecName: "kube-api-access-c7c6x") pod "474f1601-c059-4d45-a559-71caf6d3d05b" (UID: "474f1601-c059-4d45-a559-71caf6d3d05b"). InnerVolumeSpecName "kube-api-access-c7c6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.233157 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/474f1601-c059-4d45-a559-71caf6d3d05b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "474f1601-c059-4d45-a559-71caf6d3d05b" (UID: "474f1601-c059-4d45-a559-71caf6d3d05b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.278873 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7c6x\" (UniqueName: \"kubernetes.io/projected/474f1601-c059-4d45-a559-71caf6d3d05b-kube-api-access-c7c6x\") on node \"crc\" DevicePath \"\"" Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.278906 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/474f1601-c059-4d45-a559-71caf6d3d05b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.278918 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/474f1601-c059-4d45-a559-71caf6d3d05b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.680757 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5ssd" event={"ID":"474f1601-c059-4d45-a559-71caf6d3d05b","Type":"ContainerDied","Data":"1594aa2ca086d567909a2aace6d6e0823603e3bb845fba52b7e0672699e6b3ff"} Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.680812 4800 scope.go:117] "RemoveContainer" containerID="007fa710671d53d80a74cc58f611050a71ee974248ebcaf7c62026d3a8ab11b3" Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.680863 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q5ssd" Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.709559 4800 scope.go:117] "RemoveContainer" containerID="2fd4c0cab226c9e57d3b7c4d788fd719028dda300343cb2823923c2ac04c970b" Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.732610 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q5ssd"] Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.742797 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-q5ssd"] Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.748005 4800 scope.go:117] "RemoveContainer" containerID="eeba9a69354e50b1a0999cc8490a918054e5523c2a1841bea198658d12abae1f" Nov 25 17:50:31 crc kubenswrapper[4800]: I1125 17:50:31.800610 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="474f1601-c059-4d45-a559-71caf6d3d05b" path="/var/lib/kubelet/pods/474f1601-c059-4d45-a559-71caf6d3d05b/volumes" Nov 25 17:51:42 crc kubenswrapper[4800]: I1125 17:51:42.640902 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:51:42 crc kubenswrapper[4800]: I1125 17:51:42.641618 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:52:12 crc kubenswrapper[4800]: I1125 17:52:12.640477 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:52:12 crc kubenswrapper[4800]: I1125 17:52:12.641282 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:52:42 crc kubenswrapper[4800]: I1125 17:52:42.640335 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:52:42 crc kubenswrapper[4800]: I1125 17:52:42.640997 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:52:42 crc kubenswrapper[4800]: I1125 17:52:42.641063 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 17:52:42 crc kubenswrapper[4800]: I1125 17:52:42.642235 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"36804d756446b6fef336440010964b334919600cf19b967f42dfb309ecd8ca93"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:52:42 crc kubenswrapper[4800]: I1125 17:52:42.642377 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://36804d756446b6fef336440010964b334919600cf19b967f42dfb309ecd8ca93" gracePeriod=600 Nov 25 17:52:43 crc kubenswrapper[4800]: I1125 17:52:43.195067 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="36804d756446b6fef336440010964b334919600cf19b967f42dfb309ecd8ca93" exitCode=0 Nov 25 17:52:43 crc kubenswrapper[4800]: I1125 17:52:43.195422 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"36804d756446b6fef336440010964b334919600cf19b967f42dfb309ecd8ca93"} Nov 25 17:52:43 crc kubenswrapper[4800]: I1125 17:52:43.195717 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308"} Nov 25 17:52:43 crc kubenswrapper[4800]: I1125 17:52:43.195740 4800 scope.go:117] "RemoveContainer" containerID="921764daa38d30c83a13d2a947e42297cfa30b6078e72b074ef61e8d7e19fc09" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.967716 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-f8prf"] Nov 25 17:52:59 crc kubenswrapper[4800]: E1125 17:52:59.968561 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="474f1601-c059-4d45-a559-71caf6d3d05b" containerName="extract-content" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.968576 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="474f1601-c059-4d45-a559-71caf6d3d05b" containerName="extract-content" Nov 25 17:52:59 crc kubenswrapper[4800]: E1125 17:52:59.968601 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3a0c059-60af-49d9-b072-bac854d85566" containerName="registry-server" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.968611 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3a0c059-60af-49d9-b072-bac854d85566" containerName="registry-server" Nov 25 17:52:59 crc kubenswrapper[4800]: E1125 17:52:59.968640 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3a0c059-60af-49d9-b072-bac854d85566" containerName="extract-content" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.968648 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3a0c059-60af-49d9-b072-bac854d85566" containerName="extract-content" Nov 25 17:52:59 crc kubenswrapper[4800]: E1125 17:52:59.968671 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="474f1601-c059-4d45-a559-71caf6d3d05b" containerName="registry-server" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.968678 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="474f1601-c059-4d45-a559-71caf6d3d05b" containerName="registry-server" Nov 25 17:52:59 crc kubenswrapper[4800]: E1125 17:52:59.968699 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3a0c059-60af-49d9-b072-bac854d85566" containerName="extract-utilities" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.968708 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3a0c059-60af-49d9-b072-bac854d85566" containerName="extract-utilities" Nov 25 17:52:59 crc kubenswrapper[4800]: E1125 17:52:59.968725 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="474f1601-c059-4d45-a559-71caf6d3d05b" containerName="extract-utilities" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.968733 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="474f1601-c059-4d45-a559-71caf6d3d05b" containerName="extract-utilities" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.968974 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="474f1601-c059-4d45-a559-71caf6d3d05b" containerName="registry-server" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.969004 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3a0c059-60af-49d9-b072-bac854d85566" containerName="registry-server" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.970526 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.984550 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f8prf"] Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.987429 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c29227f-b697-483a-8fe7-414e58bc7c6c-catalog-content\") pod \"certified-operators-f8prf\" (UID: \"5c29227f-b697-483a-8fe7-414e58bc7c6c\") " pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.987605 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c29227f-b697-483a-8fe7-414e58bc7c6c-utilities\") pod \"certified-operators-f8prf\" (UID: \"5c29227f-b697-483a-8fe7-414e58bc7c6c\") " pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:52:59 crc kubenswrapper[4800]: I1125 17:52:59.987702 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hq2hb\" (UniqueName: \"kubernetes.io/projected/5c29227f-b697-483a-8fe7-414e58bc7c6c-kube-api-access-hq2hb\") pod \"certified-operators-f8prf\" (UID: \"5c29227f-b697-483a-8fe7-414e58bc7c6c\") " pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:00 crc kubenswrapper[4800]: I1125 17:53:00.094927 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c29227f-b697-483a-8fe7-414e58bc7c6c-utilities\") pod \"certified-operators-f8prf\" (UID: \"5c29227f-b697-483a-8fe7-414e58bc7c6c\") " pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:00 crc kubenswrapper[4800]: I1125 17:53:00.095001 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hq2hb\" (UniqueName: \"kubernetes.io/projected/5c29227f-b697-483a-8fe7-414e58bc7c6c-kube-api-access-hq2hb\") pod \"certified-operators-f8prf\" (UID: \"5c29227f-b697-483a-8fe7-414e58bc7c6c\") " pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:00 crc kubenswrapper[4800]: I1125 17:53:00.095130 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c29227f-b697-483a-8fe7-414e58bc7c6c-catalog-content\") pod \"certified-operators-f8prf\" (UID: \"5c29227f-b697-483a-8fe7-414e58bc7c6c\") " pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:00 crc kubenswrapper[4800]: I1125 17:53:00.095685 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c29227f-b697-483a-8fe7-414e58bc7c6c-catalog-content\") pod \"certified-operators-f8prf\" (UID: \"5c29227f-b697-483a-8fe7-414e58bc7c6c\") " pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:00 crc kubenswrapper[4800]: I1125 17:53:00.096163 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c29227f-b697-483a-8fe7-414e58bc7c6c-utilities\") pod \"certified-operators-f8prf\" (UID: \"5c29227f-b697-483a-8fe7-414e58bc7c6c\") " pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:00 crc kubenswrapper[4800]: I1125 17:53:00.114916 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hq2hb\" (UniqueName: \"kubernetes.io/projected/5c29227f-b697-483a-8fe7-414e58bc7c6c-kube-api-access-hq2hb\") pod \"certified-operators-f8prf\" (UID: \"5c29227f-b697-483a-8fe7-414e58bc7c6c\") " pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:00 crc kubenswrapper[4800]: I1125 17:53:00.316096 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:00 crc kubenswrapper[4800]: I1125 17:53:00.679789 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f8prf"] Nov 25 17:53:00 crc kubenswrapper[4800]: W1125 17:53:00.693902 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c29227f_b697_483a_8fe7_414e58bc7c6c.slice/crio-5f2a83a75e21127a13f1f94daca5dd1032a9a15b724ee14cc6f0515ab028ecdd WatchSource:0}: Error finding container 5f2a83a75e21127a13f1f94daca5dd1032a9a15b724ee14cc6f0515ab028ecdd: Status 404 returned error can't find the container with id 5f2a83a75e21127a13f1f94daca5dd1032a9a15b724ee14cc6f0515ab028ecdd Nov 25 17:53:01 crc kubenswrapper[4800]: I1125 17:53:01.420327 4800 generic.go:334] "Generic (PLEG): container finished" podID="5c29227f-b697-483a-8fe7-414e58bc7c6c" containerID="ad5d9c5bd6d0d42712678e36caade7e8ff27d6a5900594323d6a8aa8d216da7d" exitCode=0 Nov 25 17:53:01 crc kubenswrapper[4800]: I1125 17:53:01.420444 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f8prf" event={"ID":"5c29227f-b697-483a-8fe7-414e58bc7c6c","Type":"ContainerDied","Data":"ad5d9c5bd6d0d42712678e36caade7e8ff27d6a5900594323d6a8aa8d216da7d"} Nov 25 17:53:01 crc kubenswrapper[4800]: I1125 17:53:01.421945 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f8prf" event={"ID":"5c29227f-b697-483a-8fe7-414e58bc7c6c","Type":"ContainerStarted","Data":"5f2a83a75e21127a13f1f94daca5dd1032a9a15b724ee14cc6f0515ab028ecdd"} Nov 25 17:53:03 crc kubenswrapper[4800]: I1125 17:53:03.455977 4800 generic.go:334] "Generic (PLEG): container finished" podID="5c29227f-b697-483a-8fe7-414e58bc7c6c" containerID="6009d18e8a8b1db598e5916522dbcf983ddc0b6a225d604770880adfdf2de2a6" exitCode=0 Nov 25 17:53:03 crc kubenswrapper[4800]: I1125 17:53:03.456072 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f8prf" event={"ID":"5c29227f-b697-483a-8fe7-414e58bc7c6c","Type":"ContainerDied","Data":"6009d18e8a8b1db598e5916522dbcf983ddc0b6a225d604770880adfdf2de2a6"} Nov 25 17:53:04 crc kubenswrapper[4800]: I1125 17:53:04.477078 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f8prf" event={"ID":"5c29227f-b697-483a-8fe7-414e58bc7c6c","Type":"ContainerStarted","Data":"827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7"} Nov 25 17:53:04 crc kubenswrapper[4800]: I1125 17:53:04.507491 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-f8prf" podStartSLOduration=3.053113658 podStartE2EDuration="5.507454106s" podCreationTimestamp="2025-11-25 17:52:59 +0000 UTC" firstStartedPulling="2025-11-25 17:53:01.422615424 +0000 UTC m=+9342.477023906" lastFinishedPulling="2025-11-25 17:53:03.876955862 +0000 UTC m=+9344.931364354" observedRunningTime="2025-11-25 17:53:04.496445627 +0000 UTC m=+9345.550854139" watchObservedRunningTime="2025-11-25 17:53:04.507454106 +0000 UTC m=+9345.561862628" Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.353592 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dmfsp"] Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.357509 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.385817 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmfsp"] Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.535427 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8qxj\" (UniqueName: \"kubernetes.io/projected/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-kube-api-access-g8qxj\") pod \"redhat-marketplace-dmfsp\" (UID: \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\") " pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.535750 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-utilities\") pod \"redhat-marketplace-dmfsp\" (UID: \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\") " pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.536088 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-catalog-content\") pod \"redhat-marketplace-dmfsp\" (UID: \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\") " pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.637753 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8qxj\" (UniqueName: \"kubernetes.io/projected/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-kube-api-access-g8qxj\") pod \"redhat-marketplace-dmfsp\" (UID: \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\") " pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.637965 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-utilities\") pod \"redhat-marketplace-dmfsp\" (UID: \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\") " pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.638083 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-catalog-content\") pod \"redhat-marketplace-dmfsp\" (UID: \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\") " pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.638523 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-utilities\") pod \"redhat-marketplace-dmfsp\" (UID: \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\") " pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.638690 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-catalog-content\") pod \"redhat-marketplace-dmfsp\" (UID: \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\") " pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.660059 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8qxj\" (UniqueName: \"kubernetes.io/projected/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-kube-api-access-g8qxj\") pod \"redhat-marketplace-dmfsp\" (UID: \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\") " pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:06 crc kubenswrapper[4800]: I1125 17:53:06.678330 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:07 crc kubenswrapper[4800]: I1125 17:53:07.189218 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmfsp"] Nov 25 17:53:08 crc kubenswrapper[4800]: I1125 17:53:08.527793 4800 generic.go:334] "Generic (PLEG): container finished" podID="038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" containerID="74af85432ad5b0b6e1becf176b9f636a8cc616d8e66c8ed2ea6136029dc22b41" exitCode=0 Nov 25 17:53:08 crc kubenswrapper[4800]: I1125 17:53:08.528125 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmfsp" event={"ID":"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae","Type":"ContainerDied","Data":"74af85432ad5b0b6e1becf176b9f636a8cc616d8e66c8ed2ea6136029dc22b41"} Nov 25 17:53:08 crc kubenswrapper[4800]: I1125 17:53:08.528153 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmfsp" event={"ID":"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae","Type":"ContainerStarted","Data":"b23b0e763d35eecdbcbf0e450040aa00dd5dad6a102a581ace5d206848ff2a09"} Nov 25 17:53:09 crc kubenswrapper[4800]: I1125 17:53:09.540643 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmfsp" event={"ID":"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae","Type":"ContainerStarted","Data":"57e8265c0c51b80303aff984f3525c84fd0d7d45d67cea04c23b39d0f7cb0401"} Nov 25 17:53:10 crc kubenswrapper[4800]: I1125 17:53:10.317230 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:10 crc kubenswrapper[4800]: I1125 17:53:10.317686 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:10 crc kubenswrapper[4800]: I1125 17:53:10.405673 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:10 crc kubenswrapper[4800]: I1125 17:53:10.554263 4800 generic.go:334] "Generic (PLEG): container finished" podID="038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" containerID="57e8265c0c51b80303aff984f3525c84fd0d7d45d67cea04c23b39d0f7cb0401" exitCode=0 Nov 25 17:53:10 crc kubenswrapper[4800]: I1125 17:53:10.554370 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmfsp" event={"ID":"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae","Type":"ContainerDied","Data":"57e8265c0c51b80303aff984f3525c84fd0d7d45d67cea04c23b39d0f7cb0401"} Nov 25 17:53:10 crc kubenswrapper[4800]: I1125 17:53:10.623530 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:12 crc kubenswrapper[4800]: I1125 17:53:12.343485 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f8prf"] Nov 25 17:53:12 crc kubenswrapper[4800]: I1125 17:53:12.585835 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmfsp" event={"ID":"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae","Type":"ContainerStarted","Data":"26519a207066f7fb391d42267ab8d8455950c65411c873c02623b95ea3fe5a21"} Nov 25 17:53:12 crc kubenswrapper[4800]: I1125 17:53:12.614434 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dmfsp" podStartSLOduration=3.671771073 podStartE2EDuration="6.614408205s" podCreationTimestamp="2025-11-25 17:53:06 +0000 UTC" firstStartedPulling="2025-11-25 17:53:08.53105944 +0000 UTC m=+9349.585467962" lastFinishedPulling="2025-11-25 17:53:11.473696572 +0000 UTC m=+9352.528105094" observedRunningTime="2025-11-25 17:53:12.600065465 +0000 UTC m=+9353.654473967" watchObservedRunningTime="2025-11-25 17:53:12.614408205 +0000 UTC m=+9353.668816727" Nov 25 17:53:13 crc kubenswrapper[4800]: I1125 17:53:13.594746 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-f8prf" podUID="5c29227f-b697-483a-8fe7-414e58bc7c6c" containerName="registry-server" containerID="cri-o://827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7" gracePeriod=2 Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.130505 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.316221 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hq2hb\" (UniqueName: \"kubernetes.io/projected/5c29227f-b697-483a-8fe7-414e58bc7c6c-kube-api-access-hq2hb\") pod \"5c29227f-b697-483a-8fe7-414e58bc7c6c\" (UID: \"5c29227f-b697-483a-8fe7-414e58bc7c6c\") " Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.316348 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c29227f-b697-483a-8fe7-414e58bc7c6c-catalog-content\") pod \"5c29227f-b697-483a-8fe7-414e58bc7c6c\" (UID: \"5c29227f-b697-483a-8fe7-414e58bc7c6c\") " Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.316495 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c29227f-b697-483a-8fe7-414e58bc7c6c-utilities\") pod \"5c29227f-b697-483a-8fe7-414e58bc7c6c\" (UID: \"5c29227f-b697-483a-8fe7-414e58bc7c6c\") " Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.317322 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c29227f-b697-483a-8fe7-414e58bc7c6c-utilities" (OuterVolumeSpecName: "utilities") pod "5c29227f-b697-483a-8fe7-414e58bc7c6c" (UID: "5c29227f-b697-483a-8fe7-414e58bc7c6c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.323323 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c29227f-b697-483a-8fe7-414e58bc7c6c-kube-api-access-hq2hb" (OuterVolumeSpecName: "kube-api-access-hq2hb") pod "5c29227f-b697-483a-8fe7-414e58bc7c6c" (UID: "5c29227f-b697-483a-8fe7-414e58bc7c6c"). InnerVolumeSpecName "kube-api-access-hq2hb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.373426 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c29227f-b697-483a-8fe7-414e58bc7c6c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5c29227f-b697-483a-8fe7-414e58bc7c6c" (UID: "5c29227f-b697-483a-8fe7-414e58bc7c6c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.420141 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hq2hb\" (UniqueName: \"kubernetes.io/projected/5c29227f-b697-483a-8fe7-414e58bc7c6c-kube-api-access-hq2hb\") on node \"crc\" DevicePath \"\"" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.420173 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c29227f-b697-483a-8fe7-414e58bc7c6c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.420184 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c29227f-b697-483a-8fe7-414e58bc7c6c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.611418 4800 generic.go:334] "Generic (PLEG): container finished" podID="5c29227f-b697-483a-8fe7-414e58bc7c6c" containerID="827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7" exitCode=0 Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.611478 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f8prf" event={"ID":"5c29227f-b697-483a-8fe7-414e58bc7c6c","Type":"ContainerDied","Data":"827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7"} Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.611546 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f8prf" event={"ID":"5c29227f-b697-483a-8fe7-414e58bc7c6c","Type":"ContainerDied","Data":"5f2a83a75e21127a13f1f94daca5dd1032a9a15b724ee14cc6f0515ab028ecdd"} Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.611578 4800 scope.go:117] "RemoveContainer" containerID="827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.613242 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f8prf" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.649037 4800 scope.go:117] "RemoveContainer" containerID="6009d18e8a8b1db598e5916522dbcf983ddc0b6a225d604770880adfdf2de2a6" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.681772 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f8prf"] Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.691396 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-f8prf"] Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.716037 4800 scope.go:117] "RemoveContainer" containerID="ad5d9c5bd6d0d42712678e36caade7e8ff27d6a5900594323d6a8aa8d216da7d" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.751211 4800 scope.go:117] "RemoveContainer" containerID="827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7" Nov 25 17:53:14 crc kubenswrapper[4800]: E1125 17:53:14.751966 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7\": container with ID starting with 827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7 not found: ID does not exist" containerID="827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.752018 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7"} err="failed to get container status \"827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7\": rpc error: code = NotFound desc = could not find container \"827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7\": container with ID starting with 827678b43276a38b92c72cdda2386eb0a9e2bd746d0837b02d2707f6eae08fd7 not found: ID does not exist" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.752054 4800 scope.go:117] "RemoveContainer" containerID="6009d18e8a8b1db598e5916522dbcf983ddc0b6a225d604770880adfdf2de2a6" Nov 25 17:53:14 crc kubenswrapper[4800]: E1125 17:53:14.752407 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6009d18e8a8b1db598e5916522dbcf983ddc0b6a225d604770880adfdf2de2a6\": container with ID starting with 6009d18e8a8b1db598e5916522dbcf983ddc0b6a225d604770880adfdf2de2a6 not found: ID does not exist" containerID="6009d18e8a8b1db598e5916522dbcf983ddc0b6a225d604770880adfdf2de2a6" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.752451 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6009d18e8a8b1db598e5916522dbcf983ddc0b6a225d604770880adfdf2de2a6"} err="failed to get container status \"6009d18e8a8b1db598e5916522dbcf983ddc0b6a225d604770880adfdf2de2a6\": rpc error: code = NotFound desc = could not find container \"6009d18e8a8b1db598e5916522dbcf983ddc0b6a225d604770880adfdf2de2a6\": container with ID starting with 6009d18e8a8b1db598e5916522dbcf983ddc0b6a225d604770880adfdf2de2a6 not found: ID does not exist" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.752476 4800 scope.go:117] "RemoveContainer" containerID="ad5d9c5bd6d0d42712678e36caade7e8ff27d6a5900594323d6a8aa8d216da7d" Nov 25 17:53:14 crc kubenswrapper[4800]: E1125 17:53:14.752976 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad5d9c5bd6d0d42712678e36caade7e8ff27d6a5900594323d6a8aa8d216da7d\": container with ID starting with ad5d9c5bd6d0d42712678e36caade7e8ff27d6a5900594323d6a8aa8d216da7d not found: ID does not exist" containerID="ad5d9c5bd6d0d42712678e36caade7e8ff27d6a5900594323d6a8aa8d216da7d" Nov 25 17:53:14 crc kubenswrapper[4800]: I1125 17:53:14.753020 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad5d9c5bd6d0d42712678e36caade7e8ff27d6a5900594323d6a8aa8d216da7d"} err="failed to get container status \"ad5d9c5bd6d0d42712678e36caade7e8ff27d6a5900594323d6a8aa8d216da7d\": rpc error: code = NotFound desc = could not find container \"ad5d9c5bd6d0d42712678e36caade7e8ff27d6a5900594323d6a8aa8d216da7d\": container with ID starting with ad5d9c5bd6d0d42712678e36caade7e8ff27d6a5900594323d6a8aa8d216da7d not found: ID does not exist" Nov 25 17:53:15 crc kubenswrapper[4800]: I1125 17:53:15.798174 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c29227f-b697-483a-8fe7-414e58bc7c6c" path="/var/lib/kubelet/pods/5c29227f-b697-483a-8fe7-414e58bc7c6c/volumes" Nov 25 17:53:16 crc kubenswrapper[4800]: I1125 17:53:16.678946 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:16 crc kubenswrapper[4800]: I1125 17:53:16.679590 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:16 crc kubenswrapper[4800]: I1125 17:53:16.762497 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:17 crc kubenswrapper[4800]: I1125 17:53:17.721072 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:18 crc kubenswrapper[4800]: I1125 17:53:18.743355 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmfsp"] Nov 25 17:53:20 crc kubenswrapper[4800]: I1125 17:53:20.679987 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dmfsp" podUID="038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" containerName="registry-server" containerID="cri-o://26519a207066f7fb391d42267ab8d8455950c65411c873c02623b95ea3fe5a21" gracePeriod=2 Nov 25 17:53:21 crc kubenswrapper[4800]: I1125 17:53:21.699178 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmfsp" event={"ID":"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae","Type":"ContainerDied","Data":"26519a207066f7fb391d42267ab8d8455950c65411c873c02623b95ea3fe5a21"} Nov 25 17:53:21 crc kubenswrapper[4800]: I1125 17:53:21.699117 4800 generic.go:334] "Generic (PLEG): container finished" podID="038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" containerID="26519a207066f7fb391d42267ab8d8455950c65411c873c02623b95ea3fe5a21" exitCode=0 Nov 25 17:53:21 crc kubenswrapper[4800]: I1125 17:53:21.998716 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.101205 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8qxj\" (UniqueName: \"kubernetes.io/projected/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-kube-api-access-g8qxj\") pod \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\" (UID: \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\") " Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.101350 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-catalog-content\") pod \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\" (UID: \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\") " Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.101503 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-utilities\") pod \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\" (UID: \"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae\") " Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.103013 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-utilities" (OuterVolumeSpecName: "utilities") pod "038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" (UID: "038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.136709 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" (UID: "038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.164137 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-kube-api-access-g8qxj" (OuterVolumeSpecName: "kube-api-access-g8qxj") pod "038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" (UID: "038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae"). InnerVolumeSpecName "kube-api-access-g8qxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.204144 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.204212 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8qxj\" (UniqueName: \"kubernetes.io/projected/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-kube-api-access-g8qxj\") on node \"crc\" DevicePath \"\"" Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.204243 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.714640 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmfsp" event={"ID":"038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae","Type":"ContainerDied","Data":"b23b0e763d35eecdbcbf0e450040aa00dd5dad6a102a581ace5d206848ff2a09"} Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.714695 4800 scope.go:117] "RemoveContainer" containerID="26519a207066f7fb391d42267ab8d8455950c65411c873c02623b95ea3fe5a21" Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.714720 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmfsp" Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.760927 4800 scope.go:117] "RemoveContainer" containerID="57e8265c0c51b80303aff984f3525c84fd0d7d45d67cea04c23b39d0f7cb0401" Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.768066 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmfsp"] Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.779228 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmfsp"] Nov 25 17:53:22 crc kubenswrapper[4800]: I1125 17:53:22.821371 4800 scope.go:117] "RemoveContainer" containerID="74af85432ad5b0b6e1becf176b9f636a8cc616d8e66c8ed2ea6136029dc22b41" Nov 25 17:53:23 crc kubenswrapper[4800]: I1125 17:53:23.806648 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" path="/var/lib/kubelet/pods/038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae/volumes" Nov 25 17:55:12 crc kubenswrapper[4800]: I1125 17:55:12.639891 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:55:12 crc kubenswrapper[4800]: I1125 17:55:12.640728 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:55:42 crc kubenswrapper[4800]: I1125 17:55:42.639705 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:55:42 crc kubenswrapper[4800]: I1125 17:55:42.640378 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:56:12 crc kubenswrapper[4800]: I1125 17:56:12.640405 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:56:12 crc kubenswrapper[4800]: I1125 17:56:12.641021 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:56:12 crc kubenswrapper[4800]: I1125 17:56:12.641085 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 17:56:12 crc kubenswrapper[4800]: I1125 17:56:12.642190 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:56:12 crc kubenswrapper[4800]: I1125 17:56:12.642286 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" gracePeriod=600 Nov 25 17:56:12 crc kubenswrapper[4800]: E1125 17:56:12.771528 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:56:13 crc kubenswrapper[4800]: I1125 17:56:13.672344 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" exitCode=0 Nov 25 17:56:13 crc kubenswrapper[4800]: I1125 17:56:13.672423 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308"} Nov 25 17:56:13 crc kubenswrapper[4800]: I1125 17:56:13.672471 4800 scope.go:117] "RemoveContainer" containerID="36804d756446b6fef336440010964b334919600cf19b967f42dfb309ecd8ca93" Nov 25 17:56:13 crc kubenswrapper[4800]: I1125 17:56:13.673218 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:56:13 crc kubenswrapper[4800]: E1125 17:56:13.673798 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:56:25 crc kubenswrapper[4800]: I1125 17:56:25.791440 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:56:25 crc kubenswrapper[4800]: E1125 17:56:25.792731 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:56:36 crc kubenswrapper[4800]: I1125 17:56:36.786621 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:56:36 crc kubenswrapper[4800]: E1125 17:56:36.788543 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:56:50 crc kubenswrapper[4800]: I1125 17:56:50.785569 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:56:50 crc kubenswrapper[4800]: E1125 17:56:50.786675 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:57:04 crc kubenswrapper[4800]: I1125 17:57:04.785733 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:57:04 crc kubenswrapper[4800]: E1125 17:57:04.787089 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:57:07 crc kubenswrapper[4800]: I1125 17:57:07.770712 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="2bcf406e-1184-44de-a565-974dd28d1256" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Nov 25 17:57:18 crc kubenswrapper[4800]: I1125 17:57:18.786458 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:57:18 crc kubenswrapper[4800]: E1125 17:57:18.787544 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:57:31 crc kubenswrapper[4800]: I1125 17:57:31.790171 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:57:31 crc kubenswrapper[4800]: E1125 17:57:31.793381 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:57:45 crc kubenswrapper[4800]: I1125 17:57:45.785246 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:57:45 crc kubenswrapper[4800]: E1125 17:57:45.786111 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:57:59 crc kubenswrapper[4800]: I1125 17:57:59.802655 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:57:59 crc kubenswrapper[4800]: E1125 17:57:59.804428 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:58:12 crc kubenswrapper[4800]: I1125 17:58:12.786668 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:58:12 crc kubenswrapper[4800]: E1125 17:58:12.787709 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:58:26 crc kubenswrapper[4800]: I1125 17:58:26.786967 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:58:26 crc kubenswrapper[4800]: E1125 17:58:26.788297 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:58:38 crc kubenswrapper[4800]: I1125 17:58:38.785593 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:58:38 crc kubenswrapper[4800]: E1125 17:58:38.786751 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:58:51 crc kubenswrapper[4800]: I1125 17:58:51.785696 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:58:51 crc kubenswrapper[4800]: E1125 17:58:51.786480 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:59:06 crc kubenswrapper[4800]: I1125 17:59:06.786687 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:59:06 crc kubenswrapper[4800]: E1125 17:59:06.787941 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:59:19 crc kubenswrapper[4800]: I1125 17:59:19.804030 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:59:19 crc kubenswrapper[4800]: E1125 17:59:19.805497 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:59:33 crc kubenswrapper[4800]: I1125 17:59:33.788279 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:59:33 crc kubenswrapper[4800]: E1125 17:59:33.789601 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 17:59:47 crc kubenswrapper[4800]: I1125 17:59:47.785987 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 17:59:47 crc kubenswrapper[4800]: E1125 17:59:47.786914 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.180702 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8"] Nov 25 18:00:00 crc kubenswrapper[4800]: E1125 18:00:00.181933 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c29227f-b697-483a-8fe7-414e58bc7c6c" containerName="registry-server" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.181955 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c29227f-b697-483a-8fe7-414e58bc7c6c" containerName="registry-server" Nov 25 18:00:00 crc kubenswrapper[4800]: E1125 18:00:00.181984 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c29227f-b697-483a-8fe7-414e58bc7c6c" containerName="extract-utilities" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.181997 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c29227f-b697-483a-8fe7-414e58bc7c6c" containerName="extract-utilities" Nov 25 18:00:00 crc kubenswrapper[4800]: E1125 18:00:00.182044 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" containerName="extract-utilities" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.182058 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" containerName="extract-utilities" Nov 25 18:00:00 crc kubenswrapper[4800]: E1125 18:00:00.182095 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" containerName="extract-content" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.182107 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" containerName="extract-content" Nov 25 18:00:00 crc kubenswrapper[4800]: E1125 18:00:00.182135 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c29227f-b697-483a-8fe7-414e58bc7c6c" containerName="extract-content" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.182147 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c29227f-b697-483a-8fe7-414e58bc7c6c" containerName="extract-content" Nov 25 18:00:00 crc kubenswrapper[4800]: E1125 18:00:00.182189 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" containerName="registry-server" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.182204 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" containerName="registry-server" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.182547 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c29227f-b697-483a-8fe7-414e58bc7c6c" containerName="registry-server" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.182596 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="038f5ee3-0bb3-4f36-b99b-ff8af5e5ceae" containerName="registry-server" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.183635 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.186036 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.186164 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.194385 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8"] Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.328323 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vv2mk\" (UniqueName: \"kubernetes.io/projected/1cc3bac6-955c-4edd-971f-9cc22e34a77e-kube-api-access-vv2mk\") pod \"collect-profiles-29401560-7q6m8\" (UID: \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.328428 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cc3bac6-955c-4edd-971f-9cc22e34a77e-config-volume\") pod \"collect-profiles-29401560-7q6m8\" (UID: \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.328542 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cc3bac6-955c-4edd-971f-9cc22e34a77e-secret-volume\") pod \"collect-profiles-29401560-7q6m8\" (UID: \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.430034 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cc3bac6-955c-4edd-971f-9cc22e34a77e-secret-volume\") pod \"collect-profiles-29401560-7q6m8\" (UID: \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.430207 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vv2mk\" (UniqueName: \"kubernetes.io/projected/1cc3bac6-955c-4edd-971f-9cc22e34a77e-kube-api-access-vv2mk\") pod \"collect-profiles-29401560-7q6m8\" (UID: \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.430263 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cc3bac6-955c-4edd-971f-9cc22e34a77e-config-volume\") pod \"collect-profiles-29401560-7q6m8\" (UID: \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.431339 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cc3bac6-955c-4edd-971f-9cc22e34a77e-config-volume\") pod \"collect-profiles-29401560-7q6m8\" (UID: \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.446910 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cc3bac6-955c-4edd-971f-9cc22e34a77e-secret-volume\") pod \"collect-profiles-29401560-7q6m8\" (UID: \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.452643 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vv2mk\" (UniqueName: \"kubernetes.io/projected/1cc3bac6-955c-4edd-971f-9cc22e34a77e-kube-api-access-vv2mk\") pod \"collect-profiles-29401560-7q6m8\" (UID: \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.517118 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:00 crc kubenswrapper[4800]: I1125 18:00:00.785610 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 18:00:00 crc kubenswrapper[4800]: E1125 18:00:00.786559 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:00:01 crc kubenswrapper[4800]: I1125 18:00:01.000171 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8"] Nov 25 18:00:01 crc kubenswrapper[4800]: W1125 18:00:01.007651 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1cc3bac6_955c_4edd_971f_9cc22e34a77e.slice/crio-2aa3ce286526c9fb222cd3a6e59b4950961141ee3d2871620fb922eb773dde6b WatchSource:0}: Error finding container 2aa3ce286526c9fb222cd3a6e59b4950961141ee3d2871620fb922eb773dde6b: Status 404 returned error can't find the container with id 2aa3ce286526c9fb222cd3a6e59b4950961141ee3d2871620fb922eb773dde6b Nov 25 18:00:01 crc kubenswrapper[4800]: I1125 18:00:01.345788 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" event={"ID":"1cc3bac6-955c-4edd-971f-9cc22e34a77e","Type":"ContainerStarted","Data":"2fb319427e53108dfd8c557dde7436ff036473554dbc26680b25c192b65bdf89"} Nov 25 18:00:01 crc kubenswrapper[4800]: I1125 18:00:01.346114 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" event={"ID":"1cc3bac6-955c-4edd-971f-9cc22e34a77e","Type":"ContainerStarted","Data":"2aa3ce286526c9fb222cd3a6e59b4950961141ee3d2871620fb922eb773dde6b"} Nov 25 18:00:01 crc kubenswrapper[4800]: I1125 18:00:01.371907 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" podStartSLOduration=1.371884844 podStartE2EDuration="1.371884844s" podCreationTimestamp="2025-11-25 18:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:00:01.364636657 +0000 UTC m=+9762.419045179" watchObservedRunningTime="2025-11-25 18:00:01.371884844 +0000 UTC m=+9762.426293326" Nov 25 18:00:02 crc kubenswrapper[4800]: I1125 18:00:02.358678 4800 generic.go:334] "Generic (PLEG): container finished" podID="1cc3bac6-955c-4edd-971f-9cc22e34a77e" containerID="2fb319427e53108dfd8c557dde7436ff036473554dbc26680b25c192b65bdf89" exitCode=0 Nov 25 18:00:02 crc kubenswrapper[4800]: I1125 18:00:02.358787 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" event={"ID":"1cc3bac6-955c-4edd-971f-9cc22e34a77e","Type":"ContainerDied","Data":"2fb319427e53108dfd8c557dde7436ff036473554dbc26680b25c192b65bdf89"} Nov 25 18:00:03 crc kubenswrapper[4800]: I1125 18:00:03.724127 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:03 crc kubenswrapper[4800]: I1125 18:00:03.799818 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cc3bac6-955c-4edd-971f-9cc22e34a77e-config-volume\") pod \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\" (UID: \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\") " Nov 25 18:00:03 crc kubenswrapper[4800]: I1125 18:00:03.799897 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vv2mk\" (UniqueName: \"kubernetes.io/projected/1cc3bac6-955c-4edd-971f-9cc22e34a77e-kube-api-access-vv2mk\") pod \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\" (UID: \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\") " Nov 25 18:00:03 crc kubenswrapper[4800]: I1125 18:00:03.800091 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cc3bac6-955c-4edd-971f-9cc22e34a77e-secret-volume\") pod \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\" (UID: \"1cc3bac6-955c-4edd-971f-9cc22e34a77e\") " Nov 25 18:00:03 crc kubenswrapper[4800]: I1125 18:00:03.800873 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1cc3bac6-955c-4edd-971f-9cc22e34a77e-config-volume" (OuterVolumeSpecName: "config-volume") pod "1cc3bac6-955c-4edd-971f-9cc22e34a77e" (UID: "1cc3bac6-955c-4edd-971f-9cc22e34a77e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:00:03 crc kubenswrapper[4800]: I1125 18:00:03.801750 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cc3bac6-955c-4edd-971f-9cc22e34a77e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:00:03 crc kubenswrapper[4800]: I1125 18:00:03.805870 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cc3bac6-955c-4edd-971f-9cc22e34a77e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1cc3bac6-955c-4edd-971f-9cc22e34a77e" (UID: "1cc3bac6-955c-4edd-971f-9cc22e34a77e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:00:03 crc kubenswrapper[4800]: I1125 18:00:03.808978 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cc3bac6-955c-4edd-971f-9cc22e34a77e-kube-api-access-vv2mk" (OuterVolumeSpecName: "kube-api-access-vv2mk") pod "1cc3bac6-955c-4edd-971f-9cc22e34a77e" (UID: "1cc3bac6-955c-4edd-971f-9cc22e34a77e"). InnerVolumeSpecName "kube-api-access-vv2mk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:00:03 crc kubenswrapper[4800]: I1125 18:00:03.903820 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vv2mk\" (UniqueName: \"kubernetes.io/projected/1cc3bac6-955c-4edd-971f-9cc22e34a77e-kube-api-access-vv2mk\") on node \"crc\" DevicePath \"\"" Nov 25 18:00:03 crc kubenswrapper[4800]: I1125 18:00:03.903884 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cc3bac6-955c-4edd-971f-9cc22e34a77e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:00:04 crc kubenswrapper[4800]: I1125 18:00:04.379251 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" event={"ID":"1cc3bac6-955c-4edd-971f-9cc22e34a77e","Type":"ContainerDied","Data":"2aa3ce286526c9fb222cd3a6e59b4950961141ee3d2871620fb922eb773dde6b"} Nov 25 18:00:04 crc kubenswrapper[4800]: I1125 18:00:04.379616 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2aa3ce286526c9fb222cd3a6e59b4950961141ee3d2871620fb922eb773dde6b" Nov 25 18:00:04 crc kubenswrapper[4800]: I1125 18:00:04.379324 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-7q6m8" Nov 25 18:00:04 crc kubenswrapper[4800]: I1125 18:00:04.473462 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr"] Nov 25 18:00:04 crc kubenswrapper[4800]: I1125 18:00:04.485349 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401515-blrcr"] Nov 25 18:00:05 crc kubenswrapper[4800]: I1125 18:00:05.808399 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8c31fb7-bab0-4f45-910d-9c5612b0f83e" path="/var/lib/kubelet/pods/f8c31fb7-bab0-4f45-910d-9c5612b0f83e/volumes" Nov 25 18:00:13 crc kubenswrapper[4800]: I1125 18:00:13.785367 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 18:00:13 crc kubenswrapper[4800]: E1125 18:00:13.786367 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:00:27 crc kubenswrapper[4800]: I1125 18:00:27.785330 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 18:00:27 crc kubenswrapper[4800]: E1125 18:00:27.786280 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:00:34 crc kubenswrapper[4800]: I1125 18:00:34.719734 4800 scope.go:117] "RemoveContainer" containerID="cc4a10c0c81792eb69432245bf7ccb5d799f6f4fc6693e6bb774bea230918ef8" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.683197 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4kmt9"] Nov 25 18:00:38 crc kubenswrapper[4800]: E1125 18:00:38.684380 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cc3bac6-955c-4edd-971f-9cc22e34a77e" containerName="collect-profiles" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.684397 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cc3bac6-955c-4edd-971f-9cc22e34a77e" containerName="collect-profiles" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.684601 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cc3bac6-955c-4edd-971f-9cc22e34a77e" containerName="collect-profiles" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.688426 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.704934 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4kmt9"] Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.756544 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b59e850c-79be-42da-8d63-05cdd906fe0d-catalog-content\") pod \"community-operators-4kmt9\" (UID: \"b59e850c-79be-42da-8d63-05cdd906fe0d\") " pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.756622 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b59e850c-79be-42da-8d63-05cdd906fe0d-utilities\") pod \"community-operators-4kmt9\" (UID: \"b59e850c-79be-42da-8d63-05cdd906fe0d\") " pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.756655 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sc56\" (UniqueName: \"kubernetes.io/projected/b59e850c-79be-42da-8d63-05cdd906fe0d-kube-api-access-9sc56\") pod \"community-operators-4kmt9\" (UID: \"b59e850c-79be-42da-8d63-05cdd906fe0d\") " pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.858469 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b59e850c-79be-42da-8d63-05cdd906fe0d-catalog-content\") pod \"community-operators-4kmt9\" (UID: \"b59e850c-79be-42da-8d63-05cdd906fe0d\") " pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.858540 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b59e850c-79be-42da-8d63-05cdd906fe0d-utilities\") pod \"community-operators-4kmt9\" (UID: \"b59e850c-79be-42da-8d63-05cdd906fe0d\") " pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.858575 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sc56\" (UniqueName: \"kubernetes.io/projected/b59e850c-79be-42da-8d63-05cdd906fe0d-kube-api-access-9sc56\") pod \"community-operators-4kmt9\" (UID: \"b59e850c-79be-42da-8d63-05cdd906fe0d\") " pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.859033 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b59e850c-79be-42da-8d63-05cdd906fe0d-catalog-content\") pod \"community-operators-4kmt9\" (UID: \"b59e850c-79be-42da-8d63-05cdd906fe0d\") " pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.859046 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b59e850c-79be-42da-8d63-05cdd906fe0d-utilities\") pod \"community-operators-4kmt9\" (UID: \"b59e850c-79be-42da-8d63-05cdd906fe0d\") " pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:38 crc kubenswrapper[4800]: I1125 18:00:38.880799 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sc56\" (UniqueName: \"kubernetes.io/projected/b59e850c-79be-42da-8d63-05cdd906fe0d-kube-api-access-9sc56\") pod \"community-operators-4kmt9\" (UID: \"b59e850c-79be-42da-8d63-05cdd906fe0d\") " pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:39 crc kubenswrapper[4800]: I1125 18:00:39.032466 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:39 crc kubenswrapper[4800]: I1125 18:00:39.569158 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4kmt9"] Nov 25 18:00:39 crc kubenswrapper[4800]: I1125 18:00:39.813190 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4kmt9" event={"ID":"b59e850c-79be-42da-8d63-05cdd906fe0d","Type":"ContainerStarted","Data":"0bb9de8ee163afc90c08150eb535fb3190efe13da401c1fda6f31805869eca41"} Nov 25 18:00:39 crc kubenswrapper[4800]: I1125 18:00:39.814315 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4kmt9" event={"ID":"b59e850c-79be-42da-8d63-05cdd906fe0d","Type":"ContainerStarted","Data":"a37804e29ec030ead65f7be2d17246b1c793ecc388fcf9cf9e14d1ff3da0fade"} Nov 25 18:00:40 crc kubenswrapper[4800]: I1125 18:00:40.856027 4800 generic.go:334] "Generic (PLEG): container finished" podID="b59e850c-79be-42da-8d63-05cdd906fe0d" containerID="0bb9de8ee163afc90c08150eb535fb3190efe13da401c1fda6f31805869eca41" exitCode=0 Nov 25 18:00:40 crc kubenswrapper[4800]: I1125 18:00:40.856817 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4kmt9" event={"ID":"b59e850c-79be-42da-8d63-05cdd906fe0d","Type":"ContainerDied","Data":"0bb9de8ee163afc90c08150eb535fb3190efe13da401c1fda6f31805869eca41"} Nov 25 18:00:40 crc kubenswrapper[4800]: I1125 18:00:40.861556 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:00:41 crc kubenswrapper[4800]: I1125 18:00:41.785650 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 18:00:41 crc kubenswrapper[4800]: E1125 18:00:41.786199 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:00:42 crc kubenswrapper[4800]: I1125 18:00:42.882231 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4kmt9" event={"ID":"b59e850c-79be-42da-8d63-05cdd906fe0d","Type":"ContainerStarted","Data":"cd6b5dedd6421a7946bc7b64483aa035e5dd15d1b94425838891a083a5adf9c0"} Nov 25 18:00:43 crc kubenswrapper[4800]: I1125 18:00:43.894536 4800 generic.go:334] "Generic (PLEG): container finished" podID="b59e850c-79be-42da-8d63-05cdd906fe0d" containerID="cd6b5dedd6421a7946bc7b64483aa035e5dd15d1b94425838891a083a5adf9c0" exitCode=0 Nov 25 18:00:43 crc kubenswrapper[4800]: I1125 18:00:43.894579 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4kmt9" event={"ID":"b59e850c-79be-42da-8d63-05cdd906fe0d","Type":"ContainerDied","Data":"cd6b5dedd6421a7946bc7b64483aa035e5dd15d1b94425838891a083a5adf9c0"} Nov 25 18:00:45 crc kubenswrapper[4800]: I1125 18:00:45.927734 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4kmt9" event={"ID":"b59e850c-79be-42da-8d63-05cdd906fe0d","Type":"ContainerStarted","Data":"4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d"} Nov 25 18:00:45 crc kubenswrapper[4800]: I1125 18:00:45.950030 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4kmt9" podStartSLOduration=3.937443378 podStartE2EDuration="7.950015713s" podCreationTimestamp="2025-11-25 18:00:38 +0000 UTC" firstStartedPulling="2025-11-25 18:00:40.861359711 +0000 UTC m=+9801.915768193" lastFinishedPulling="2025-11-25 18:00:44.873932046 +0000 UTC m=+9805.928340528" observedRunningTime="2025-11-25 18:00:45.945157791 +0000 UTC m=+9806.999566273" watchObservedRunningTime="2025-11-25 18:00:45.950015713 +0000 UTC m=+9807.004424185" Nov 25 18:00:49 crc kubenswrapper[4800]: I1125 18:00:49.032891 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:49 crc kubenswrapper[4800]: I1125 18:00:49.033476 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:49 crc kubenswrapper[4800]: I1125 18:00:49.081464 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.147767 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-sm76m"] Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.150655 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.160378 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sm76m"] Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.235571 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-utilities\") pod \"redhat-operators-sm76m\" (UID: \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\") " pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.235694 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzb8f\" (UniqueName: \"kubernetes.io/projected/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-kube-api-access-lzb8f\") pod \"redhat-operators-sm76m\" (UID: \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\") " pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.235722 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-catalog-content\") pod \"redhat-operators-sm76m\" (UID: \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\") " pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.337899 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-utilities\") pod \"redhat-operators-sm76m\" (UID: \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\") " pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.338049 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzb8f\" (UniqueName: \"kubernetes.io/projected/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-kube-api-access-lzb8f\") pod \"redhat-operators-sm76m\" (UID: \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\") " pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.338093 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-catalog-content\") pod \"redhat-operators-sm76m\" (UID: \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\") " pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.338508 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-utilities\") pod \"redhat-operators-sm76m\" (UID: \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\") " pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.338590 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-catalog-content\") pod \"redhat-operators-sm76m\" (UID: \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\") " pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.371187 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzb8f\" (UniqueName: \"kubernetes.io/projected/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-kube-api-access-lzb8f\") pod \"redhat-operators-sm76m\" (UID: \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\") " pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.489265 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:00:54 crc kubenswrapper[4800]: I1125 18:00:54.785581 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 18:00:54 crc kubenswrapper[4800]: E1125 18:00:54.786093 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:00:55 crc kubenswrapper[4800]: I1125 18:00:55.033470 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sm76m"] Nov 25 18:00:56 crc kubenswrapper[4800]: I1125 18:00:56.026026 4800 generic.go:334] "Generic (PLEG): container finished" podID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" containerID="318711e288cd75b97cc11303f2d6855bdba1c3b83f015ea0e18da78dcabeefd5" exitCode=0 Nov 25 18:00:56 crc kubenswrapper[4800]: I1125 18:00:56.026114 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sm76m" event={"ID":"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0","Type":"ContainerDied","Data":"318711e288cd75b97cc11303f2d6855bdba1c3b83f015ea0e18da78dcabeefd5"} Nov 25 18:00:56 crc kubenswrapper[4800]: I1125 18:00:56.026911 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sm76m" event={"ID":"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0","Type":"ContainerStarted","Data":"ee11ee1d3191490f3016491351b2408d2459598f93c52bdb947b9f58064c16fa"} Nov 25 18:00:58 crc kubenswrapper[4800]: I1125 18:00:58.050931 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sm76m" event={"ID":"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0","Type":"ContainerStarted","Data":"085f0de44d09a1a1561565167595572cd98a17db361801009f6f77d2c0049716"} Nov 25 18:00:59 crc kubenswrapper[4800]: I1125 18:00:59.073376 4800 generic.go:334] "Generic (PLEG): container finished" podID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" containerID="085f0de44d09a1a1561565167595572cd98a17db361801009f6f77d2c0049716" exitCode=0 Nov 25 18:00:59 crc kubenswrapper[4800]: I1125 18:00:59.073664 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sm76m" event={"ID":"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0","Type":"ContainerDied","Data":"085f0de44d09a1a1561565167595572cd98a17db361801009f6f77d2c0049716"} Nov 25 18:00:59 crc kubenswrapper[4800]: I1125 18:00:59.116306 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:00:59 crc kubenswrapper[4800]: I1125 18:00:59.507443 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4kmt9"] Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.087615 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sm76m" event={"ID":"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0","Type":"ContainerStarted","Data":"c09e028b6758c66fb4cc32af7505a09efd16bf1670b9268a573ab19cbb4896d4"} Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.087721 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4kmt9" podUID="b59e850c-79be-42da-8d63-05cdd906fe0d" containerName="registry-server" containerID="cri-o://4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d" gracePeriod=2 Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.114182 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-sm76m" podStartSLOduration=2.480330938 podStartE2EDuration="6.114161482s" podCreationTimestamp="2025-11-25 18:00:54 +0000 UTC" firstStartedPulling="2025-11-25 18:00:56.029965753 +0000 UTC m=+9817.084374265" lastFinishedPulling="2025-11-25 18:00:59.663796297 +0000 UTC m=+9820.718204809" observedRunningTime="2025-11-25 18:01:00.108459537 +0000 UTC m=+9821.162868029" watchObservedRunningTime="2025-11-25 18:01:00.114161482 +0000 UTC m=+9821.168569974" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.170110 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401561-9hv22"] Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.171382 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.184030 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401561-9hv22"] Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.258317 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxspw\" (UniqueName: \"kubernetes.io/projected/6a98967c-9e7a-4ada-8c02-da06d025d5c2-kube-api-access-qxspw\") pod \"keystone-cron-29401561-9hv22\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.258459 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-config-data\") pod \"keystone-cron-29401561-9hv22\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.258517 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-fernet-keys\") pod \"keystone-cron-29401561-9hv22\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.258537 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-combined-ca-bundle\") pod \"keystone-cron-29401561-9hv22\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.359958 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxspw\" (UniqueName: \"kubernetes.io/projected/6a98967c-9e7a-4ada-8c02-da06d025d5c2-kube-api-access-qxspw\") pod \"keystone-cron-29401561-9hv22\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.360135 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-config-data\") pod \"keystone-cron-29401561-9hv22\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.360204 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-fernet-keys\") pod \"keystone-cron-29401561-9hv22\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.360228 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-combined-ca-bundle\") pod \"keystone-cron-29401561-9hv22\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.367999 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-config-data\") pod \"keystone-cron-29401561-9hv22\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.370577 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-fernet-keys\") pod \"keystone-cron-29401561-9hv22\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.376184 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-combined-ca-bundle\") pod \"keystone-cron-29401561-9hv22\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.376335 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxspw\" (UniqueName: \"kubernetes.io/projected/6a98967c-9e7a-4ada-8c02-da06d025d5c2-kube-api-access-qxspw\") pod \"keystone-cron-29401561-9hv22\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.510546 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.525084 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.671836 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9sc56\" (UniqueName: \"kubernetes.io/projected/b59e850c-79be-42da-8d63-05cdd906fe0d-kube-api-access-9sc56\") pod \"b59e850c-79be-42da-8d63-05cdd906fe0d\" (UID: \"b59e850c-79be-42da-8d63-05cdd906fe0d\") " Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.672211 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b59e850c-79be-42da-8d63-05cdd906fe0d-catalog-content\") pod \"b59e850c-79be-42da-8d63-05cdd906fe0d\" (UID: \"b59e850c-79be-42da-8d63-05cdd906fe0d\") " Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.672346 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b59e850c-79be-42da-8d63-05cdd906fe0d-utilities\") pod \"b59e850c-79be-42da-8d63-05cdd906fe0d\" (UID: \"b59e850c-79be-42da-8d63-05cdd906fe0d\") " Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.673770 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b59e850c-79be-42da-8d63-05cdd906fe0d-utilities" (OuterVolumeSpecName: "utilities") pod "b59e850c-79be-42da-8d63-05cdd906fe0d" (UID: "b59e850c-79be-42da-8d63-05cdd906fe0d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.678117 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b59e850c-79be-42da-8d63-05cdd906fe0d-kube-api-access-9sc56" (OuterVolumeSpecName: "kube-api-access-9sc56") pod "b59e850c-79be-42da-8d63-05cdd906fe0d" (UID: "b59e850c-79be-42da-8d63-05cdd906fe0d"). InnerVolumeSpecName "kube-api-access-9sc56". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.740352 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b59e850c-79be-42da-8d63-05cdd906fe0d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b59e850c-79be-42da-8d63-05cdd906fe0d" (UID: "b59e850c-79be-42da-8d63-05cdd906fe0d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.774291 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b59e850c-79be-42da-8d63-05cdd906fe0d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.774319 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9sc56\" (UniqueName: \"kubernetes.io/projected/b59e850c-79be-42da-8d63-05cdd906fe0d-kube-api-access-9sc56\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:00 crc kubenswrapper[4800]: I1125 18:01:00.774332 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b59e850c-79be-42da-8d63-05cdd906fe0d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.058462 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401561-9hv22"] Nov 25 18:01:01 crc kubenswrapper[4800]: W1125 18:01:01.069796 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a98967c_9e7a_4ada_8c02_da06d025d5c2.slice/crio-112a1c1b83a69047f0d2bd81f7b4057722e68c506505c54aa777bf0f96b70489 WatchSource:0}: Error finding container 112a1c1b83a69047f0d2bd81f7b4057722e68c506505c54aa777bf0f96b70489: Status 404 returned error can't find the container with id 112a1c1b83a69047f0d2bd81f7b4057722e68c506505c54aa777bf0f96b70489 Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.097909 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401561-9hv22" event={"ID":"6a98967c-9e7a-4ada-8c02-da06d025d5c2","Type":"ContainerStarted","Data":"112a1c1b83a69047f0d2bd81f7b4057722e68c506505c54aa777bf0f96b70489"} Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.101410 4800 generic.go:334] "Generic (PLEG): container finished" podID="b59e850c-79be-42da-8d63-05cdd906fe0d" containerID="4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d" exitCode=0 Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.102339 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4kmt9" Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.104028 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4kmt9" event={"ID":"b59e850c-79be-42da-8d63-05cdd906fe0d","Type":"ContainerDied","Data":"4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d"} Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.104107 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4kmt9" event={"ID":"b59e850c-79be-42da-8d63-05cdd906fe0d","Type":"ContainerDied","Data":"a37804e29ec030ead65f7be2d17246b1c793ecc388fcf9cf9e14d1ff3da0fade"} Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.104140 4800 scope.go:117] "RemoveContainer" containerID="4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d" Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.233225 4800 scope.go:117] "RemoveContainer" containerID="cd6b5dedd6421a7946bc7b64483aa035e5dd15d1b94425838891a083a5adf9c0" Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.270567 4800 scope.go:117] "RemoveContainer" containerID="0bb9de8ee163afc90c08150eb535fb3190efe13da401c1fda6f31805869eca41" Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.275125 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4kmt9"] Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.283294 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4kmt9"] Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.336937 4800 scope.go:117] "RemoveContainer" containerID="4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d" Nov 25 18:01:01 crc kubenswrapper[4800]: E1125 18:01:01.337513 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d\": container with ID starting with 4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d not found: ID does not exist" containerID="4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d" Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.337567 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d"} err="failed to get container status \"4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d\": rpc error: code = NotFound desc = could not find container \"4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d\": container with ID starting with 4ecc54976dac03755b46972a2d0e91e649123b96dbb4c90fd20515f027784e0d not found: ID does not exist" Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.337596 4800 scope.go:117] "RemoveContainer" containerID="cd6b5dedd6421a7946bc7b64483aa035e5dd15d1b94425838891a083a5adf9c0" Nov 25 18:01:01 crc kubenswrapper[4800]: E1125 18:01:01.337994 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd6b5dedd6421a7946bc7b64483aa035e5dd15d1b94425838891a083a5adf9c0\": container with ID starting with cd6b5dedd6421a7946bc7b64483aa035e5dd15d1b94425838891a083a5adf9c0 not found: ID does not exist" containerID="cd6b5dedd6421a7946bc7b64483aa035e5dd15d1b94425838891a083a5adf9c0" Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.338029 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd6b5dedd6421a7946bc7b64483aa035e5dd15d1b94425838891a083a5adf9c0"} err="failed to get container status \"cd6b5dedd6421a7946bc7b64483aa035e5dd15d1b94425838891a083a5adf9c0\": rpc error: code = NotFound desc = could not find container \"cd6b5dedd6421a7946bc7b64483aa035e5dd15d1b94425838891a083a5adf9c0\": container with ID starting with cd6b5dedd6421a7946bc7b64483aa035e5dd15d1b94425838891a083a5adf9c0 not found: ID does not exist" Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.338047 4800 scope.go:117] "RemoveContainer" containerID="0bb9de8ee163afc90c08150eb535fb3190efe13da401c1fda6f31805869eca41" Nov 25 18:01:01 crc kubenswrapper[4800]: E1125 18:01:01.338413 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bb9de8ee163afc90c08150eb535fb3190efe13da401c1fda6f31805869eca41\": container with ID starting with 0bb9de8ee163afc90c08150eb535fb3190efe13da401c1fda6f31805869eca41 not found: ID does not exist" containerID="0bb9de8ee163afc90c08150eb535fb3190efe13da401c1fda6f31805869eca41" Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.338503 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bb9de8ee163afc90c08150eb535fb3190efe13da401c1fda6f31805869eca41"} err="failed to get container status \"0bb9de8ee163afc90c08150eb535fb3190efe13da401c1fda6f31805869eca41\": rpc error: code = NotFound desc = could not find container \"0bb9de8ee163afc90c08150eb535fb3190efe13da401c1fda6f31805869eca41\": container with ID starting with 0bb9de8ee163afc90c08150eb535fb3190efe13da401c1fda6f31805869eca41 not found: ID does not exist" Nov 25 18:01:01 crc kubenswrapper[4800]: I1125 18:01:01.799455 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b59e850c-79be-42da-8d63-05cdd906fe0d" path="/var/lib/kubelet/pods/b59e850c-79be-42da-8d63-05cdd906fe0d/volumes" Nov 25 18:01:02 crc kubenswrapper[4800]: I1125 18:01:02.112174 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401561-9hv22" event={"ID":"6a98967c-9e7a-4ada-8c02-da06d025d5c2","Type":"ContainerStarted","Data":"a921e3e613b74092629e818235d10341e39a43b230431b99e5a2ea106d2a75c9"} Nov 25 18:01:02 crc kubenswrapper[4800]: I1125 18:01:02.133138 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401561-9hv22" podStartSLOduration=2.133112413 podStartE2EDuration="2.133112413s" podCreationTimestamp="2025-11-25 18:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:01:02.131778726 +0000 UTC m=+9823.186187208" watchObservedRunningTime="2025-11-25 18:01:02.133112413 +0000 UTC m=+9823.187520905" Nov 25 18:01:04 crc kubenswrapper[4800]: I1125 18:01:04.129703 4800 generic.go:334] "Generic (PLEG): container finished" podID="6a98967c-9e7a-4ada-8c02-da06d025d5c2" containerID="a921e3e613b74092629e818235d10341e39a43b230431b99e5a2ea106d2a75c9" exitCode=0 Nov 25 18:01:04 crc kubenswrapper[4800]: I1125 18:01:04.129806 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401561-9hv22" event={"ID":"6a98967c-9e7a-4ada-8c02-da06d025d5c2","Type":"ContainerDied","Data":"a921e3e613b74092629e818235d10341e39a43b230431b99e5a2ea106d2a75c9"} Nov 25 18:01:04 crc kubenswrapper[4800]: I1125 18:01:04.490319 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:01:04 crc kubenswrapper[4800]: I1125 18:01:04.491503 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:01:05 crc kubenswrapper[4800]: I1125 18:01:05.465495 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:05 crc kubenswrapper[4800]: I1125 18:01:05.554340 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-sm76m" podUID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" containerName="registry-server" probeResult="failure" output=< Nov 25 18:01:05 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 18:01:05 crc kubenswrapper[4800]: > Nov 25 18:01:05 crc kubenswrapper[4800]: I1125 18:01:05.595807 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-combined-ca-bundle\") pod \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " Nov 25 18:01:05 crc kubenswrapper[4800]: I1125 18:01:05.595921 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxspw\" (UniqueName: \"kubernetes.io/projected/6a98967c-9e7a-4ada-8c02-da06d025d5c2-kube-api-access-qxspw\") pod \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " Nov 25 18:01:05 crc kubenswrapper[4800]: I1125 18:01:05.595990 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-fernet-keys\") pod \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " Nov 25 18:01:05 crc kubenswrapper[4800]: I1125 18:01:05.596023 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-config-data\") pod \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\" (UID: \"6a98967c-9e7a-4ada-8c02-da06d025d5c2\") " Nov 25 18:01:06 crc kubenswrapper[4800]: I1125 18:01:06.067210 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6a98967c-9e7a-4ada-8c02-da06d025d5c2" (UID: "6a98967c-9e7a-4ada-8c02-da06d025d5c2"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:01:06 crc kubenswrapper[4800]: I1125 18:01:06.067785 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a98967c-9e7a-4ada-8c02-da06d025d5c2-kube-api-access-qxspw" (OuterVolumeSpecName: "kube-api-access-qxspw") pod "6a98967c-9e7a-4ada-8c02-da06d025d5c2" (UID: "6a98967c-9e7a-4ada-8c02-da06d025d5c2"). InnerVolumeSpecName "kube-api-access-qxspw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:01:06 crc kubenswrapper[4800]: I1125 18:01:06.108500 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxspw\" (UniqueName: \"kubernetes.io/projected/6a98967c-9e7a-4ada-8c02-da06d025d5c2-kube-api-access-qxspw\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:06 crc kubenswrapper[4800]: I1125 18:01:06.108892 4800 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:06 crc kubenswrapper[4800]: I1125 18:01:06.108989 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6a98967c-9e7a-4ada-8c02-da06d025d5c2" (UID: "6a98967c-9e7a-4ada-8c02-da06d025d5c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:01:06 crc kubenswrapper[4800]: I1125 18:01:06.131064 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-config-data" (OuterVolumeSpecName: "config-data") pod "6a98967c-9e7a-4ada-8c02-da06d025d5c2" (UID: "6a98967c-9e7a-4ada-8c02-da06d025d5c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:01:06 crc kubenswrapper[4800]: I1125 18:01:06.150338 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401561-9hv22" event={"ID":"6a98967c-9e7a-4ada-8c02-da06d025d5c2","Type":"ContainerDied","Data":"112a1c1b83a69047f0d2bd81f7b4057722e68c506505c54aa777bf0f96b70489"} Nov 25 18:01:06 crc kubenswrapper[4800]: I1125 18:01:06.150374 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="112a1c1b83a69047f0d2bd81f7b4057722e68c506505c54aa777bf0f96b70489" Nov 25 18:01:06 crc kubenswrapper[4800]: I1125 18:01:06.150421 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401561-9hv22" Nov 25 18:01:06 crc kubenswrapper[4800]: I1125 18:01:06.210413 4800 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:06 crc kubenswrapper[4800]: I1125 18:01:06.210614 4800 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a98967c-9e7a-4ada-8c02-da06d025d5c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:07 crc kubenswrapper[4800]: I1125 18:01:07.786014 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 18:01:07 crc kubenswrapper[4800]: E1125 18:01:07.786349 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:01:15 crc kubenswrapper[4800]: I1125 18:01:15.409885 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:01:15 crc kubenswrapper[4800]: I1125 18:01:15.457201 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:01:15 crc kubenswrapper[4800]: I1125 18:01:15.651961 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sm76m"] Nov 25 18:01:17 crc kubenswrapper[4800]: I1125 18:01:17.292788 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-sm76m" podUID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" containerName="registry-server" containerID="cri-o://c09e028b6758c66fb4cc32af7505a09efd16bf1670b9268a573ab19cbb4896d4" gracePeriod=2 Nov 25 18:01:18 crc kubenswrapper[4800]: I1125 18:01:18.307818 4800 generic.go:334] "Generic (PLEG): container finished" podID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" containerID="c09e028b6758c66fb4cc32af7505a09efd16bf1670b9268a573ab19cbb4896d4" exitCode=0 Nov 25 18:01:18 crc kubenswrapper[4800]: I1125 18:01:18.307881 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sm76m" event={"ID":"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0","Type":"ContainerDied","Data":"c09e028b6758c66fb4cc32af7505a09efd16bf1670b9268a573ab19cbb4896d4"} Nov 25 18:01:18 crc kubenswrapper[4800]: I1125 18:01:18.675059 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:01:18 crc kubenswrapper[4800]: I1125 18:01:18.792418 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzb8f\" (UniqueName: \"kubernetes.io/projected/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-kube-api-access-lzb8f\") pod \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\" (UID: \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\") " Nov 25 18:01:18 crc kubenswrapper[4800]: I1125 18:01:18.792521 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-utilities\") pod \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\" (UID: \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\") " Nov 25 18:01:18 crc kubenswrapper[4800]: I1125 18:01:18.792663 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-catalog-content\") pod \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\" (UID: \"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0\") " Nov 25 18:01:18 crc kubenswrapper[4800]: I1125 18:01:18.793477 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-utilities" (OuterVolumeSpecName: "utilities") pod "c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" (UID: "c7712f9a-5b4b-43cd-b073-17f2d45fa1d0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:01:18 crc kubenswrapper[4800]: I1125 18:01:18.798090 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-kube-api-access-lzb8f" (OuterVolumeSpecName: "kube-api-access-lzb8f") pod "c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" (UID: "c7712f9a-5b4b-43cd-b073-17f2d45fa1d0"). InnerVolumeSpecName "kube-api-access-lzb8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:01:18 crc kubenswrapper[4800]: I1125 18:01:18.891219 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" (UID: "c7712f9a-5b4b-43cd-b073-17f2d45fa1d0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:01:18 crc kubenswrapper[4800]: I1125 18:01:18.895764 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzb8f\" (UniqueName: \"kubernetes.io/projected/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-kube-api-access-lzb8f\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:18 crc kubenswrapper[4800]: I1125 18:01:18.895804 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:18 crc kubenswrapper[4800]: I1125 18:01:18.895816 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:19 crc kubenswrapper[4800]: I1125 18:01:19.316147 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sm76m" event={"ID":"c7712f9a-5b4b-43cd-b073-17f2d45fa1d0","Type":"ContainerDied","Data":"ee11ee1d3191490f3016491351b2408d2459598f93c52bdb947b9f58064c16fa"} Nov 25 18:01:19 crc kubenswrapper[4800]: I1125 18:01:19.316415 4800 scope.go:117] "RemoveContainer" containerID="c09e028b6758c66fb4cc32af7505a09efd16bf1670b9268a573ab19cbb4896d4" Nov 25 18:01:19 crc kubenswrapper[4800]: I1125 18:01:19.316524 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sm76m" Nov 25 18:01:19 crc kubenswrapper[4800]: I1125 18:01:19.348479 4800 scope.go:117] "RemoveContainer" containerID="085f0de44d09a1a1561565167595572cd98a17db361801009f6f77d2c0049716" Nov 25 18:01:19 crc kubenswrapper[4800]: I1125 18:01:19.359910 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sm76m"] Nov 25 18:01:19 crc kubenswrapper[4800]: I1125 18:01:19.367743 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-sm76m"] Nov 25 18:01:19 crc kubenswrapper[4800]: I1125 18:01:19.381413 4800 scope.go:117] "RemoveContainer" containerID="318711e288cd75b97cc11303f2d6855bdba1c3b83f015ea0e18da78dcabeefd5" Nov 25 18:01:19 crc kubenswrapper[4800]: E1125 18:01:19.516240 4800 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7712f9a_5b4b_43cd_b073_17f2d45fa1d0.slice/crio-ee11ee1d3191490f3016491351b2408d2459598f93c52bdb947b9f58064c16fa\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7712f9a_5b4b_43cd_b073_17f2d45fa1d0.slice\": RecentStats: unable to find data in memory cache]" Nov 25 18:01:19 crc kubenswrapper[4800]: I1125 18:01:19.812343 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" path="/var/lib/kubelet/pods/c7712f9a-5b4b-43cd-b073-17f2d45fa1d0/volumes" Nov 25 18:01:20 crc kubenswrapper[4800]: I1125 18:01:20.785662 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 18:01:21 crc kubenswrapper[4800]: I1125 18:01:21.346469 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"44642c89cc2e6e77c59027ec6b690d8a73cbb0fdae2856b19eba18a6aaa5941f"} Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.828495 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-q6rvl"] Nov 25 18:03:21 crc kubenswrapper[4800]: E1125 18:03:21.829953 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a98967c-9e7a-4ada-8c02-da06d025d5c2" containerName="keystone-cron" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.829969 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a98967c-9e7a-4ada-8c02-da06d025d5c2" containerName="keystone-cron" Nov 25 18:03:21 crc kubenswrapper[4800]: E1125 18:03:21.829988 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" containerName="registry-server" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.829996 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" containerName="registry-server" Nov 25 18:03:21 crc kubenswrapper[4800]: E1125 18:03:21.830018 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" containerName="extract-utilities" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.830026 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" containerName="extract-utilities" Nov 25 18:03:21 crc kubenswrapper[4800]: E1125 18:03:21.830050 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b59e850c-79be-42da-8d63-05cdd906fe0d" containerName="extract-content" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.830060 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b59e850c-79be-42da-8d63-05cdd906fe0d" containerName="extract-content" Nov 25 18:03:21 crc kubenswrapper[4800]: E1125 18:03:21.830074 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b59e850c-79be-42da-8d63-05cdd906fe0d" containerName="extract-utilities" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.830085 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b59e850c-79be-42da-8d63-05cdd906fe0d" containerName="extract-utilities" Nov 25 18:03:21 crc kubenswrapper[4800]: E1125 18:03:21.830106 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b59e850c-79be-42da-8d63-05cdd906fe0d" containerName="registry-server" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.830113 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b59e850c-79be-42da-8d63-05cdd906fe0d" containerName="registry-server" Nov 25 18:03:21 crc kubenswrapper[4800]: E1125 18:03:21.830131 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" containerName="extract-content" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.830138 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" containerName="extract-content" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.830393 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a98967c-9e7a-4ada-8c02-da06d025d5c2" containerName="keystone-cron" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.830413 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7712f9a-5b4b-43cd-b073-17f2d45fa1d0" containerName="registry-server" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.830429 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b59e850c-79be-42da-8d63-05cdd906fe0d" containerName="registry-server" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.832248 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.845031 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-q6rvl"] Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.906475 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63dac0d9-904c-449e-a684-77e569e50a4a-utilities\") pod \"redhat-marketplace-q6rvl\" (UID: \"63dac0d9-904c-449e-a684-77e569e50a4a\") " pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.906831 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63dac0d9-904c-449e-a684-77e569e50a4a-catalog-content\") pod \"redhat-marketplace-q6rvl\" (UID: \"63dac0d9-904c-449e-a684-77e569e50a4a\") " pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:21 crc kubenswrapper[4800]: I1125 18:03:21.908031 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mqjx\" (UniqueName: \"kubernetes.io/projected/63dac0d9-904c-449e-a684-77e569e50a4a-kube-api-access-8mqjx\") pod \"redhat-marketplace-q6rvl\" (UID: \"63dac0d9-904c-449e-a684-77e569e50a4a\") " pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:22 crc kubenswrapper[4800]: I1125 18:03:22.009475 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63dac0d9-904c-449e-a684-77e569e50a4a-utilities\") pod \"redhat-marketplace-q6rvl\" (UID: \"63dac0d9-904c-449e-a684-77e569e50a4a\") " pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:22 crc kubenswrapper[4800]: I1125 18:03:22.009570 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63dac0d9-904c-449e-a684-77e569e50a4a-catalog-content\") pod \"redhat-marketplace-q6rvl\" (UID: \"63dac0d9-904c-449e-a684-77e569e50a4a\") " pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:22 crc kubenswrapper[4800]: I1125 18:03:22.009635 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mqjx\" (UniqueName: \"kubernetes.io/projected/63dac0d9-904c-449e-a684-77e569e50a4a-kube-api-access-8mqjx\") pod \"redhat-marketplace-q6rvl\" (UID: \"63dac0d9-904c-449e-a684-77e569e50a4a\") " pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:22 crc kubenswrapper[4800]: I1125 18:03:22.010151 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63dac0d9-904c-449e-a684-77e569e50a4a-utilities\") pod \"redhat-marketplace-q6rvl\" (UID: \"63dac0d9-904c-449e-a684-77e569e50a4a\") " pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:22 crc kubenswrapper[4800]: I1125 18:03:22.010179 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63dac0d9-904c-449e-a684-77e569e50a4a-catalog-content\") pod \"redhat-marketplace-q6rvl\" (UID: \"63dac0d9-904c-449e-a684-77e569e50a4a\") " pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:22 crc kubenswrapper[4800]: I1125 18:03:22.031986 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mqjx\" (UniqueName: \"kubernetes.io/projected/63dac0d9-904c-449e-a684-77e569e50a4a-kube-api-access-8mqjx\") pod \"redhat-marketplace-q6rvl\" (UID: \"63dac0d9-904c-449e-a684-77e569e50a4a\") " pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:22 crc kubenswrapper[4800]: I1125 18:03:22.188388 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:22 crc kubenswrapper[4800]: I1125 18:03:22.647975 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-q6rvl"] Nov 25 18:03:22 crc kubenswrapper[4800]: W1125 18:03:22.653895 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod63dac0d9_904c_449e_a684_77e569e50a4a.slice/crio-e29a52d2e8e1e81a1917184e4d6c9562135b2a5cc56d95f8f59d9b87ef110773 WatchSource:0}: Error finding container e29a52d2e8e1e81a1917184e4d6c9562135b2a5cc56d95f8f59d9b87ef110773: Status 404 returned error can't find the container with id e29a52d2e8e1e81a1917184e4d6c9562135b2a5cc56d95f8f59d9b87ef110773 Nov 25 18:03:23 crc kubenswrapper[4800]: I1125 18:03:23.479478 4800 generic.go:334] "Generic (PLEG): container finished" podID="63dac0d9-904c-449e-a684-77e569e50a4a" containerID="9cc9221a164380c043449600587e4b7b7fb5201efd22a89eaf92cf113e27f7a8" exitCode=0 Nov 25 18:03:23 crc kubenswrapper[4800]: I1125 18:03:23.479894 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q6rvl" event={"ID":"63dac0d9-904c-449e-a684-77e569e50a4a","Type":"ContainerDied","Data":"9cc9221a164380c043449600587e4b7b7fb5201efd22a89eaf92cf113e27f7a8"} Nov 25 18:03:23 crc kubenswrapper[4800]: I1125 18:03:23.479930 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q6rvl" event={"ID":"63dac0d9-904c-449e-a684-77e569e50a4a","Type":"ContainerStarted","Data":"e29a52d2e8e1e81a1917184e4d6c9562135b2a5cc56d95f8f59d9b87ef110773"} Nov 25 18:03:25 crc kubenswrapper[4800]: I1125 18:03:25.503760 4800 generic.go:334] "Generic (PLEG): container finished" podID="63dac0d9-904c-449e-a684-77e569e50a4a" containerID="441403bc94154177e6b59b9a35795af3f7f75c8872145841276fbaa434ad670d" exitCode=0 Nov 25 18:03:25 crc kubenswrapper[4800]: I1125 18:03:25.503811 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q6rvl" event={"ID":"63dac0d9-904c-449e-a684-77e569e50a4a","Type":"ContainerDied","Data":"441403bc94154177e6b59b9a35795af3f7f75c8872145841276fbaa434ad670d"} Nov 25 18:03:26 crc kubenswrapper[4800]: I1125 18:03:26.525327 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q6rvl" event={"ID":"63dac0d9-904c-449e-a684-77e569e50a4a","Type":"ContainerStarted","Data":"0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97"} Nov 25 18:03:26 crc kubenswrapper[4800]: I1125 18:03:26.557852 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-q6rvl" podStartSLOduration=3.073045542 podStartE2EDuration="5.557814696s" podCreationTimestamp="2025-11-25 18:03:21 +0000 UTC" firstStartedPulling="2025-11-25 18:03:23.482222675 +0000 UTC m=+9964.536631167" lastFinishedPulling="2025-11-25 18:03:25.966991829 +0000 UTC m=+9967.021400321" observedRunningTime="2025-11-25 18:03:26.544443034 +0000 UTC m=+9967.598851536" watchObservedRunningTime="2025-11-25 18:03:26.557814696 +0000 UTC m=+9967.612223188" Nov 25 18:03:32 crc kubenswrapper[4800]: I1125 18:03:32.189567 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:32 crc kubenswrapper[4800]: I1125 18:03:32.190306 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:32 crc kubenswrapper[4800]: I1125 18:03:32.288369 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:32 crc kubenswrapper[4800]: I1125 18:03:32.705907 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:32 crc kubenswrapper[4800]: I1125 18:03:32.789168 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-q6rvl"] Nov 25 18:03:34 crc kubenswrapper[4800]: I1125 18:03:34.643576 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-q6rvl" podUID="63dac0d9-904c-449e-a684-77e569e50a4a" containerName="registry-server" containerID="cri-o://0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97" gracePeriod=2 Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.175218 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.274087 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mqjx\" (UniqueName: \"kubernetes.io/projected/63dac0d9-904c-449e-a684-77e569e50a4a-kube-api-access-8mqjx\") pod \"63dac0d9-904c-449e-a684-77e569e50a4a\" (UID: \"63dac0d9-904c-449e-a684-77e569e50a4a\") " Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.274168 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63dac0d9-904c-449e-a684-77e569e50a4a-catalog-content\") pod \"63dac0d9-904c-449e-a684-77e569e50a4a\" (UID: \"63dac0d9-904c-449e-a684-77e569e50a4a\") " Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.275003 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63dac0d9-904c-449e-a684-77e569e50a4a-utilities\") pod \"63dac0d9-904c-449e-a684-77e569e50a4a\" (UID: \"63dac0d9-904c-449e-a684-77e569e50a4a\") " Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.276595 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63dac0d9-904c-449e-a684-77e569e50a4a-utilities" (OuterVolumeSpecName: "utilities") pod "63dac0d9-904c-449e-a684-77e569e50a4a" (UID: "63dac0d9-904c-449e-a684-77e569e50a4a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.282137 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63dac0d9-904c-449e-a684-77e569e50a4a-kube-api-access-8mqjx" (OuterVolumeSpecName: "kube-api-access-8mqjx") pod "63dac0d9-904c-449e-a684-77e569e50a4a" (UID: "63dac0d9-904c-449e-a684-77e569e50a4a"). InnerVolumeSpecName "kube-api-access-8mqjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.301289 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63dac0d9-904c-449e-a684-77e569e50a4a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "63dac0d9-904c-449e-a684-77e569e50a4a" (UID: "63dac0d9-904c-449e-a684-77e569e50a4a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.378197 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mqjx\" (UniqueName: \"kubernetes.io/projected/63dac0d9-904c-449e-a684-77e569e50a4a-kube-api-access-8mqjx\") on node \"crc\" DevicePath \"\"" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.378248 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63dac0d9-904c-449e-a684-77e569e50a4a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.378268 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63dac0d9-904c-449e-a684-77e569e50a4a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.667969 4800 generic.go:334] "Generic (PLEG): container finished" podID="63dac0d9-904c-449e-a684-77e569e50a4a" containerID="0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97" exitCode=0 Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.668046 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q6rvl" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.668070 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q6rvl" event={"ID":"63dac0d9-904c-449e-a684-77e569e50a4a","Type":"ContainerDied","Data":"0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97"} Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.668579 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q6rvl" event={"ID":"63dac0d9-904c-449e-a684-77e569e50a4a","Type":"ContainerDied","Data":"e29a52d2e8e1e81a1917184e4d6c9562135b2a5cc56d95f8f59d9b87ef110773"} Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.668634 4800 scope.go:117] "RemoveContainer" containerID="0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.716805 4800 scope.go:117] "RemoveContainer" containerID="441403bc94154177e6b59b9a35795af3f7f75c8872145841276fbaa434ad670d" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.727319 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-q6rvl"] Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.740985 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-q6rvl"] Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.766939 4800 scope.go:117] "RemoveContainer" containerID="9cc9221a164380c043449600587e4b7b7fb5201efd22a89eaf92cf113e27f7a8" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.810447 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63dac0d9-904c-449e-a684-77e569e50a4a" path="/var/lib/kubelet/pods/63dac0d9-904c-449e-a684-77e569e50a4a/volumes" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.812648 4800 scope.go:117] "RemoveContainer" containerID="0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97" Nov 25 18:03:35 crc kubenswrapper[4800]: E1125 18:03:35.813052 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97\": container with ID starting with 0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97 not found: ID does not exist" containerID="0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.813111 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97"} err="failed to get container status \"0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97\": rpc error: code = NotFound desc = could not find container \"0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97\": container with ID starting with 0002d61a3cb49e29384d11cdf0955ca5e58f93f63db7c8e2f8523b5a25b70f97 not found: ID does not exist" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.813156 4800 scope.go:117] "RemoveContainer" containerID="441403bc94154177e6b59b9a35795af3f7f75c8872145841276fbaa434ad670d" Nov 25 18:03:35 crc kubenswrapper[4800]: E1125 18:03:35.813482 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"441403bc94154177e6b59b9a35795af3f7f75c8872145841276fbaa434ad670d\": container with ID starting with 441403bc94154177e6b59b9a35795af3f7f75c8872145841276fbaa434ad670d not found: ID does not exist" containerID="441403bc94154177e6b59b9a35795af3f7f75c8872145841276fbaa434ad670d" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.813524 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"441403bc94154177e6b59b9a35795af3f7f75c8872145841276fbaa434ad670d"} err="failed to get container status \"441403bc94154177e6b59b9a35795af3f7f75c8872145841276fbaa434ad670d\": rpc error: code = NotFound desc = could not find container \"441403bc94154177e6b59b9a35795af3f7f75c8872145841276fbaa434ad670d\": container with ID starting with 441403bc94154177e6b59b9a35795af3f7f75c8872145841276fbaa434ad670d not found: ID does not exist" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.813549 4800 scope.go:117] "RemoveContainer" containerID="9cc9221a164380c043449600587e4b7b7fb5201efd22a89eaf92cf113e27f7a8" Nov 25 18:03:35 crc kubenswrapper[4800]: E1125 18:03:35.813917 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cc9221a164380c043449600587e4b7b7fb5201efd22a89eaf92cf113e27f7a8\": container with ID starting with 9cc9221a164380c043449600587e4b7b7fb5201efd22a89eaf92cf113e27f7a8 not found: ID does not exist" containerID="9cc9221a164380c043449600587e4b7b7fb5201efd22a89eaf92cf113e27f7a8" Nov 25 18:03:35 crc kubenswrapper[4800]: I1125 18:03:35.813964 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cc9221a164380c043449600587e4b7b7fb5201efd22a89eaf92cf113e27f7a8"} err="failed to get container status \"9cc9221a164380c043449600587e4b7b7fb5201efd22a89eaf92cf113e27f7a8\": rpc error: code = NotFound desc = could not find container \"9cc9221a164380c043449600587e4b7b7fb5201efd22a89eaf92cf113e27f7a8\": container with ID starting with 9cc9221a164380c043449600587e4b7b7fb5201efd22a89eaf92cf113e27f7a8 not found: ID does not exist" Nov 25 18:03:42 crc kubenswrapper[4800]: I1125 18:03:42.640645 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:03:42 crc kubenswrapper[4800]: I1125 18:03:42.641422 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.720181 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9m5fb"] Nov 25 18:03:57 crc kubenswrapper[4800]: E1125 18:03:57.720952 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63dac0d9-904c-449e-a684-77e569e50a4a" containerName="extract-utilities" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.720963 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="63dac0d9-904c-449e-a684-77e569e50a4a" containerName="extract-utilities" Nov 25 18:03:57 crc kubenswrapper[4800]: E1125 18:03:57.720980 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63dac0d9-904c-449e-a684-77e569e50a4a" containerName="extract-content" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.720986 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="63dac0d9-904c-449e-a684-77e569e50a4a" containerName="extract-content" Nov 25 18:03:57 crc kubenswrapper[4800]: E1125 18:03:57.721008 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63dac0d9-904c-449e-a684-77e569e50a4a" containerName="registry-server" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.721014 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="63dac0d9-904c-449e-a684-77e569e50a4a" containerName="registry-server" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.721191 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="63dac0d9-904c-449e-a684-77e569e50a4a" containerName="registry-server" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.722493 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.743549 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9m5fb"] Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.836515 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a7b373-f344-4ea3-a777-7b1d1b2deaad-catalog-content\") pod \"certified-operators-9m5fb\" (UID: \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\") " pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.836590 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a7b373-f344-4ea3-a777-7b1d1b2deaad-utilities\") pod \"certified-operators-9m5fb\" (UID: \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\") " pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.836648 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2rx5\" (UniqueName: \"kubernetes.io/projected/66a7b373-f344-4ea3-a777-7b1d1b2deaad-kube-api-access-f2rx5\") pod \"certified-operators-9m5fb\" (UID: \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\") " pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.938751 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a7b373-f344-4ea3-a777-7b1d1b2deaad-catalog-content\") pod \"certified-operators-9m5fb\" (UID: \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\") " pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.938834 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a7b373-f344-4ea3-a777-7b1d1b2deaad-utilities\") pod \"certified-operators-9m5fb\" (UID: \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\") " pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.938919 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2rx5\" (UniqueName: \"kubernetes.io/projected/66a7b373-f344-4ea3-a777-7b1d1b2deaad-kube-api-access-f2rx5\") pod \"certified-operators-9m5fb\" (UID: \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\") " pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.939378 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a7b373-f344-4ea3-a777-7b1d1b2deaad-catalog-content\") pod \"certified-operators-9m5fb\" (UID: \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\") " pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.939601 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a7b373-f344-4ea3-a777-7b1d1b2deaad-utilities\") pod \"certified-operators-9m5fb\" (UID: \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\") " pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:03:57 crc kubenswrapper[4800]: I1125 18:03:57.966947 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2rx5\" (UniqueName: \"kubernetes.io/projected/66a7b373-f344-4ea3-a777-7b1d1b2deaad-kube-api-access-f2rx5\") pod \"certified-operators-9m5fb\" (UID: \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\") " pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:03:58 crc kubenswrapper[4800]: I1125 18:03:58.044565 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:03:58 crc kubenswrapper[4800]: I1125 18:03:58.631553 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9m5fb"] Nov 25 18:03:58 crc kubenswrapper[4800]: I1125 18:03:58.936067 4800 generic.go:334] "Generic (PLEG): container finished" podID="66a7b373-f344-4ea3-a777-7b1d1b2deaad" containerID="fe3bed6929e2139b0085ffe736f2708c61d6013dbc2c9a7c80ff86b4927d15e6" exitCode=0 Nov 25 18:03:58 crc kubenswrapper[4800]: I1125 18:03:58.936112 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9m5fb" event={"ID":"66a7b373-f344-4ea3-a777-7b1d1b2deaad","Type":"ContainerDied","Data":"fe3bed6929e2139b0085ffe736f2708c61d6013dbc2c9a7c80ff86b4927d15e6"} Nov 25 18:03:58 crc kubenswrapper[4800]: I1125 18:03:58.936140 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9m5fb" event={"ID":"66a7b373-f344-4ea3-a777-7b1d1b2deaad","Type":"ContainerStarted","Data":"ad57f9126cf94183367f46e3551b999e57e88768fdbf7d1635155e18587e28ad"} Nov 25 18:04:04 crc kubenswrapper[4800]: I1125 18:04:04.001400 4800 generic.go:334] "Generic (PLEG): container finished" podID="66a7b373-f344-4ea3-a777-7b1d1b2deaad" containerID="501c5779e52c1029187a554b7ed4e2d0cfc03ba124c5046f931904fe09b406d9" exitCode=0 Nov 25 18:04:04 crc kubenswrapper[4800]: I1125 18:04:04.001467 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9m5fb" event={"ID":"66a7b373-f344-4ea3-a777-7b1d1b2deaad","Type":"ContainerDied","Data":"501c5779e52c1029187a554b7ed4e2d0cfc03ba124c5046f931904fe09b406d9"} Nov 25 18:04:05 crc kubenswrapper[4800]: I1125 18:04:05.016372 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9m5fb" event={"ID":"66a7b373-f344-4ea3-a777-7b1d1b2deaad","Type":"ContainerStarted","Data":"540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1"} Nov 25 18:04:08 crc kubenswrapper[4800]: I1125 18:04:08.045114 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:04:08 crc kubenswrapper[4800]: I1125 18:04:08.046409 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:04:08 crc kubenswrapper[4800]: I1125 18:04:08.129027 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:04:08 crc kubenswrapper[4800]: I1125 18:04:08.161383 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9m5fb" podStartSLOduration=5.66770921 podStartE2EDuration="11.161363124s" podCreationTimestamp="2025-11-25 18:03:57 +0000 UTC" firstStartedPulling="2025-11-25 18:03:58.940512482 +0000 UTC m=+9999.994920964" lastFinishedPulling="2025-11-25 18:04:04.434166356 +0000 UTC m=+10005.488574878" observedRunningTime="2025-11-25 18:04:05.043343081 +0000 UTC m=+10006.097751653" watchObservedRunningTime="2025-11-25 18:04:08.161363124 +0000 UTC m=+10009.215771616" Nov 25 18:04:09 crc kubenswrapper[4800]: I1125 18:04:09.623463 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:04:09 crc kubenswrapper[4800]: I1125 18:04:09.712550 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9m5fb"] Nov 25 18:04:09 crc kubenswrapper[4800]: I1125 18:04:09.760745 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7hdqn"] Nov 25 18:04:09 crc kubenswrapper[4800]: I1125 18:04:09.761123 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7hdqn" podUID="3a67a8f3-a3e6-4d62-a901-ca2427e73f08" containerName="registry-server" containerID="cri-o://71444e1ec03530124e6eaaeeb5143a7c41d9a25ce415e8984ac6d334319d98de" gracePeriod=2 Nov 25 18:04:10 crc kubenswrapper[4800]: I1125 18:04:10.088335 4800 generic.go:334] "Generic (PLEG): container finished" podID="3a67a8f3-a3e6-4d62-a901-ca2427e73f08" containerID="71444e1ec03530124e6eaaeeb5143a7c41d9a25ce415e8984ac6d334319d98de" exitCode=0 Nov 25 18:04:10 crc kubenswrapper[4800]: I1125 18:04:10.088553 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hdqn" event={"ID":"3a67a8f3-a3e6-4d62-a901-ca2427e73f08","Type":"ContainerDied","Data":"71444e1ec03530124e6eaaeeb5143a7c41d9a25ce415e8984ac6d334319d98de"} Nov 25 18:04:10 crc kubenswrapper[4800]: I1125 18:04:10.283798 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 18:04:10 crc kubenswrapper[4800]: I1125 18:04:10.390117 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2422\" (UniqueName: \"kubernetes.io/projected/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-kube-api-access-j2422\") pod \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\" (UID: \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\") " Nov 25 18:04:10 crc kubenswrapper[4800]: I1125 18:04:10.390217 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-catalog-content\") pod \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\" (UID: \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\") " Nov 25 18:04:10 crc kubenswrapper[4800]: I1125 18:04:10.390295 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-utilities\") pod \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\" (UID: \"3a67a8f3-a3e6-4d62-a901-ca2427e73f08\") " Nov 25 18:04:10 crc kubenswrapper[4800]: I1125 18:04:10.393381 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-utilities" (OuterVolumeSpecName: "utilities") pod "3a67a8f3-a3e6-4d62-a901-ca2427e73f08" (UID: "3a67a8f3-a3e6-4d62-a901-ca2427e73f08"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:04:10 crc kubenswrapper[4800]: I1125 18:04:10.407495 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-kube-api-access-j2422" (OuterVolumeSpecName: "kube-api-access-j2422") pod "3a67a8f3-a3e6-4d62-a901-ca2427e73f08" (UID: "3a67a8f3-a3e6-4d62-a901-ca2427e73f08"). InnerVolumeSpecName "kube-api-access-j2422". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:04:10 crc kubenswrapper[4800]: I1125 18:04:10.478501 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3a67a8f3-a3e6-4d62-a901-ca2427e73f08" (UID: "3a67a8f3-a3e6-4d62-a901-ca2427e73f08"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:04:10 crc kubenswrapper[4800]: I1125 18:04:10.492947 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2422\" (UniqueName: \"kubernetes.io/projected/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-kube-api-access-j2422\") on node \"crc\" DevicePath \"\"" Nov 25 18:04:10 crc kubenswrapper[4800]: I1125 18:04:10.492992 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:04:10 crc kubenswrapper[4800]: I1125 18:04:10.493005 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a67a8f3-a3e6-4d62-a901-ca2427e73f08-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:04:11 crc kubenswrapper[4800]: I1125 18:04:11.113470 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hdqn" event={"ID":"3a67a8f3-a3e6-4d62-a901-ca2427e73f08","Type":"ContainerDied","Data":"eb3faab092f5a846e0ec645c5d126c4d518461e19413e175394dd479cf978080"} Nov 25 18:04:11 crc kubenswrapper[4800]: I1125 18:04:11.113756 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7hdqn" Nov 25 18:04:11 crc kubenswrapper[4800]: I1125 18:04:11.113811 4800 scope.go:117] "RemoveContainer" containerID="71444e1ec03530124e6eaaeeb5143a7c41d9a25ce415e8984ac6d334319d98de" Nov 25 18:04:11 crc kubenswrapper[4800]: I1125 18:04:11.155883 4800 scope.go:117] "RemoveContainer" containerID="59206745335abbc14cea306fdf4e4de6834a435aefbc4ab3f0a4dd21fa896cc1" Nov 25 18:04:11 crc kubenswrapper[4800]: I1125 18:04:11.156965 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7hdqn"] Nov 25 18:04:11 crc kubenswrapper[4800]: I1125 18:04:11.165306 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7hdqn"] Nov 25 18:04:11 crc kubenswrapper[4800]: I1125 18:04:11.395652 4800 scope.go:117] "RemoveContainer" containerID="cde8fad6a1ff14ca0c497a36bc26fd5aeab6fd1a393d737e10701765a7914313" Nov 25 18:04:11 crc kubenswrapper[4800]: I1125 18:04:11.797086 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a67a8f3-a3e6-4d62-a901-ca2427e73f08" path="/var/lib/kubelet/pods/3a67a8f3-a3e6-4d62-a901-ca2427e73f08/volumes" Nov 25 18:04:12 crc kubenswrapper[4800]: I1125 18:04:12.639809 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:04:12 crc kubenswrapper[4800]: I1125 18:04:12.640323 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:04:42 crc kubenswrapper[4800]: I1125 18:04:42.640369 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:04:42 crc kubenswrapper[4800]: I1125 18:04:42.641030 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:04:42 crc kubenswrapper[4800]: I1125 18:04:42.641082 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 18:04:42 crc kubenswrapper[4800]: I1125 18:04:42.641710 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"44642c89cc2e6e77c59027ec6b690d8a73cbb0fdae2856b19eba18a6aaa5941f"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:04:42 crc kubenswrapper[4800]: I1125 18:04:42.641776 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://44642c89cc2e6e77c59027ec6b690d8a73cbb0fdae2856b19eba18a6aaa5941f" gracePeriod=600 Nov 25 18:04:43 crc kubenswrapper[4800]: I1125 18:04:43.469825 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="44642c89cc2e6e77c59027ec6b690d8a73cbb0fdae2856b19eba18a6aaa5941f" exitCode=0 Nov 25 18:04:43 crc kubenswrapper[4800]: I1125 18:04:43.469873 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"44642c89cc2e6e77c59027ec6b690d8a73cbb0fdae2856b19eba18a6aaa5941f"} Nov 25 18:04:43 crc kubenswrapper[4800]: I1125 18:04:43.470253 4800 scope.go:117] "RemoveContainer" containerID="24ffe982850af646d6caea80212187ab1fe6ce7f536c8850c8571d2e95691308" Nov 25 18:04:44 crc kubenswrapper[4800]: I1125 18:04:44.482770 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf"} Nov 25 18:07:12 crc kubenswrapper[4800]: I1125 18:07:12.639834 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:07:12 crc kubenswrapper[4800]: I1125 18:07:12.640887 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:07:42 crc kubenswrapper[4800]: I1125 18:07:42.640161 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:07:42 crc kubenswrapper[4800]: I1125 18:07:42.640829 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:08:12 crc kubenswrapper[4800]: I1125 18:08:12.640303 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:08:12 crc kubenswrapper[4800]: I1125 18:08:12.641090 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:08:12 crc kubenswrapper[4800]: I1125 18:08:12.641186 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 18:08:12 crc kubenswrapper[4800]: I1125 18:08:12.642414 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:08:12 crc kubenswrapper[4800]: I1125 18:08:12.642484 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" gracePeriod=600 Nov 25 18:08:12 crc kubenswrapper[4800]: I1125 18:08:12.801468 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" exitCode=0 Nov 25 18:08:12 crc kubenswrapper[4800]: I1125 18:08:12.801550 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf"} Nov 25 18:08:12 crc kubenswrapper[4800]: I1125 18:08:12.801617 4800 scope.go:117] "RemoveContainer" containerID="44642c89cc2e6e77c59027ec6b690d8a73cbb0fdae2856b19eba18a6aaa5941f" Nov 25 18:08:13 crc kubenswrapper[4800]: E1125 18:08:13.502992 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:08:13 crc kubenswrapper[4800]: I1125 18:08:13.816210 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:08:13 crc kubenswrapper[4800]: E1125 18:08:13.817139 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:08:28 crc kubenswrapper[4800]: I1125 18:08:28.785779 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:08:28 crc kubenswrapper[4800]: E1125 18:08:28.786677 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:08:42 crc kubenswrapper[4800]: I1125 18:08:42.785291 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:08:42 crc kubenswrapper[4800]: E1125 18:08:42.786705 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:08:56 crc kubenswrapper[4800]: I1125 18:08:56.785225 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:08:56 crc kubenswrapper[4800]: E1125 18:08:56.786085 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:09:09 crc kubenswrapper[4800]: I1125 18:09:09.794398 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:09:09 crc kubenswrapper[4800]: E1125 18:09:09.795127 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:09:23 crc kubenswrapper[4800]: I1125 18:09:23.785492 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:09:23 crc kubenswrapper[4800]: E1125 18:09:23.786356 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:09:36 crc kubenswrapper[4800]: I1125 18:09:36.785211 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:09:36 crc kubenswrapper[4800]: E1125 18:09:36.786001 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:09:48 crc kubenswrapper[4800]: I1125 18:09:48.786264 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:09:48 crc kubenswrapper[4800]: E1125 18:09:48.786956 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:10:01 crc kubenswrapper[4800]: I1125 18:10:01.786150 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:10:01 crc kubenswrapper[4800]: E1125 18:10:01.789540 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:10:12 crc kubenswrapper[4800]: I1125 18:10:12.785509 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:10:12 crc kubenswrapper[4800]: E1125 18:10:12.786617 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:10:26 crc kubenswrapper[4800]: I1125 18:10:26.786283 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:10:26 crc kubenswrapper[4800]: E1125 18:10:26.787547 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:10:37 crc kubenswrapper[4800]: I1125 18:10:37.787220 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:10:37 crc kubenswrapper[4800]: E1125 18:10:37.788257 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:10:49 crc kubenswrapper[4800]: I1125 18:10:49.785973 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:10:49 crc kubenswrapper[4800]: E1125 18:10:49.787011 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:11:03 crc kubenswrapper[4800]: I1125 18:11:03.786262 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:11:03 crc kubenswrapper[4800]: E1125 18:11:03.787457 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:11:17 crc kubenswrapper[4800]: I1125 18:11:17.785198 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:11:17 crc kubenswrapper[4800]: E1125 18:11:17.786026 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.360758 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b9sm6"] Nov 25 18:11:18 crc kubenswrapper[4800]: E1125 18:11:18.361597 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a67a8f3-a3e6-4d62-a901-ca2427e73f08" containerName="extract-utilities" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.361623 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a67a8f3-a3e6-4d62-a901-ca2427e73f08" containerName="extract-utilities" Nov 25 18:11:18 crc kubenswrapper[4800]: E1125 18:11:18.361649 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a67a8f3-a3e6-4d62-a901-ca2427e73f08" containerName="registry-server" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.361656 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a67a8f3-a3e6-4d62-a901-ca2427e73f08" containerName="registry-server" Nov 25 18:11:18 crc kubenswrapper[4800]: E1125 18:11:18.361697 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a67a8f3-a3e6-4d62-a901-ca2427e73f08" containerName="extract-content" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.361703 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a67a8f3-a3e6-4d62-a901-ca2427e73f08" containerName="extract-content" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.361942 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a67a8f3-a3e6-4d62-a901-ca2427e73f08" containerName="registry-server" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.363351 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.382969 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdg5q\" (UniqueName: \"kubernetes.io/projected/dbe909a3-b38b-41cf-8c35-403a2ab916f4-kube-api-access-jdg5q\") pod \"redhat-operators-b9sm6\" (UID: \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\") " pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.383097 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe909a3-b38b-41cf-8c35-403a2ab916f4-utilities\") pod \"redhat-operators-b9sm6\" (UID: \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\") " pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.383174 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe909a3-b38b-41cf-8c35-403a2ab916f4-catalog-content\") pod \"redhat-operators-b9sm6\" (UID: \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\") " pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.401112 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b9sm6"] Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.485209 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdg5q\" (UniqueName: \"kubernetes.io/projected/dbe909a3-b38b-41cf-8c35-403a2ab916f4-kube-api-access-jdg5q\") pod \"redhat-operators-b9sm6\" (UID: \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\") " pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.485357 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe909a3-b38b-41cf-8c35-403a2ab916f4-utilities\") pod \"redhat-operators-b9sm6\" (UID: \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\") " pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.485876 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe909a3-b38b-41cf-8c35-403a2ab916f4-utilities\") pod \"redhat-operators-b9sm6\" (UID: \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\") " pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.486027 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe909a3-b38b-41cf-8c35-403a2ab916f4-catalog-content\") pod \"redhat-operators-b9sm6\" (UID: \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\") " pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.486287 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe909a3-b38b-41cf-8c35-403a2ab916f4-catalog-content\") pod \"redhat-operators-b9sm6\" (UID: \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\") " pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.505800 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdg5q\" (UniqueName: \"kubernetes.io/projected/dbe909a3-b38b-41cf-8c35-403a2ab916f4-kube-api-access-jdg5q\") pod \"redhat-operators-b9sm6\" (UID: \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\") " pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:18 crc kubenswrapper[4800]: I1125 18:11:18.690772 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:19 crc kubenswrapper[4800]: I1125 18:11:19.174968 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b9sm6"] Nov 25 18:11:19 crc kubenswrapper[4800]: I1125 18:11:19.889172 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9sm6" event={"ID":"dbe909a3-b38b-41cf-8c35-403a2ab916f4","Type":"ContainerStarted","Data":"a8034bfe71d1938f73724376df8e55bffa6e35039e7e63102e810973c0ccbc6e"} Nov 25 18:11:19 crc kubenswrapper[4800]: I1125 18:11:19.889500 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9sm6" event={"ID":"dbe909a3-b38b-41cf-8c35-403a2ab916f4","Type":"ContainerStarted","Data":"c275daabab7a58082185b9637d102473c8d58d3145be35e2cc3061cc17c6ac58"} Nov 25 18:11:20 crc kubenswrapper[4800]: I1125 18:11:20.901519 4800 generic.go:334] "Generic (PLEG): container finished" podID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" containerID="a8034bfe71d1938f73724376df8e55bffa6e35039e7e63102e810973c0ccbc6e" exitCode=0 Nov 25 18:11:20 crc kubenswrapper[4800]: I1125 18:11:20.901573 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9sm6" event={"ID":"dbe909a3-b38b-41cf-8c35-403a2ab916f4","Type":"ContainerDied","Data":"a8034bfe71d1938f73724376df8e55bffa6e35039e7e63102e810973c0ccbc6e"} Nov 25 18:11:20 crc kubenswrapper[4800]: I1125 18:11:20.905413 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:11:22 crc kubenswrapper[4800]: I1125 18:11:22.926782 4800 generic.go:334] "Generic (PLEG): container finished" podID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" containerID="bb78edce0d50527fd6b0e2606e802acf30d1206f4836187aa76cdc23983a6fb8" exitCode=0 Nov 25 18:11:22 crc kubenswrapper[4800]: I1125 18:11:22.926890 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9sm6" event={"ID":"dbe909a3-b38b-41cf-8c35-403a2ab916f4","Type":"ContainerDied","Data":"bb78edce0d50527fd6b0e2606e802acf30d1206f4836187aa76cdc23983a6fb8"} Nov 25 18:11:23 crc kubenswrapper[4800]: I1125 18:11:23.938594 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9sm6" event={"ID":"dbe909a3-b38b-41cf-8c35-403a2ab916f4","Type":"ContainerStarted","Data":"8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140"} Nov 25 18:11:23 crc kubenswrapper[4800]: I1125 18:11:23.961770 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b9sm6" podStartSLOduration=3.526932115 podStartE2EDuration="5.96174232s" podCreationTimestamp="2025-11-25 18:11:18 +0000 UTC" firstStartedPulling="2025-11-25 18:11:20.905059005 +0000 UTC m=+10441.959467497" lastFinishedPulling="2025-11-25 18:11:23.33986923 +0000 UTC m=+10444.394277702" observedRunningTime="2025-11-25 18:11:23.954793322 +0000 UTC m=+10445.009201814" watchObservedRunningTime="2025-11-25 18:11:23.96174232 +0000 UTC m=+10445.016150812" Nov 25 18:11:28 crc kubenswrapper[4800]: I1125 18:11:28.691404 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:28 crc kubenswrapper[4800]: I1125 18:11:28.693003 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:29 crc kubenswrapper[4800]: I1125 18:11:29.734355 4800 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b9sm6" podUID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" containerName="registry-server" probeResult="failure" output=< Nov 25 18:11:29 crc kubenswrapper[4800]: timeout: failed to connect service ":50051" within 1s Nov 25 18:11:29 crc kubenswrapper[4800]: > Nov 25 18:11:31 crc kubenswrapper[4800]: I1125 18:11:31.786566 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:11:31 crc kubenswrapper[4800]: E1125 18:11:31.787249 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:11:38 crc kubenswrapper[4800]: I1125 18:11:38.757543 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:38 crc kubenswrapper[4800]: I1125 18:11:38.818317 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.129437 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-n4kht"] Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.132755 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.159471 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n4kht"] Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.265008 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-utilities\") pod \"community-operators-n4kht\" (UID: \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\") " pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.265446 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-catalog-content\") pod \"community-operators-n4kht\" (UID: \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\") " pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.265573 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzpjh\" (UniqueName: \"kubernetes.io/projected/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-kube-api-access-kzpjh\") pod \"community-operators-n4kht\" (UID: \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\") " pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.367571 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-catalog-content\") pod \"community-operators-n4kht\" (UID: \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\") " pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.367626 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzpjh\" (UniqueName: \"kubernetes.io/projected/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-kube-api-access-kzpjh\") pod \"community-operators-n4kht\" (UID: \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\") " pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.367745 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-utilities\") pod \"community-operators-n4kht\" (UID: \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\") " pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.368144 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-catalog-content\") pod \"community-operators-n4kht\" (UID: \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\") " pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.368190 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-utilities\") pod \"community-operators-n4kht\" (UID: \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\") " pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.387627 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzpjh\" (UniqueName: \"kubernetes.io/projected/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-kube-api-access-kzpjh\") pod \"community-operators-n4kht\" (UID: \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\") " pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:40 crc kubenswrapper[4800]: I1125 18:11:40.460178 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:41 crc kubenswrapper[4800]: I1125 18:11:41.070123 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n4kht"] Nov 25 18:11:41 crc kubenswrapper[4800]: W1125 18:11:41.072816 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40da1b5f_71fd_4d13_b9c2_9175ac3a95ae.slice/crio-8b43b157b8502cdfe930fea96788d6b1469b9643569ac903262811fac8e23cb1 WatchSource:0}: Error finding container 8b43b157b8502cdfe930fea96788d6b1469b9643569ac903262811fac8e23cb1: Status 404 returned error can't find the container with id 8b43b157b8502cdfe930fea96788d6b1469b9643569ac903262811fac8e23cb1 Nov 25 18:11:41 crc kubenswrapper[4800]: I1125 18:11:41.139716 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4kht" event={"ID":"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae","Type":"ContainerStarted","Data":"8b43b157b8502cdfe930fea96788d6b1469b9643569ac903262811fac8e23cb1"} Nov 25 18:11:42 crc kubenswrapper[4800]: I1125 18:11:42.149919 4800 generic.go:334] "Generic (PLEG): container finished" podID="40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" containerID="ee8719223b8509503c5d78bbe50609fbb732e7e1a8b01236f54e0261b4f83bb5" exitCode=0 Nov 25 18:11:42 crc kubenswrapper[4800]: I1125 18:11:42.150032 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4kht" event={"ID":"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae","Type":"ContainerDied","Data":"ee8719223b8509503c5d78bbe50609fbb732e7e1a8b01236f54e0261b4f83bb5"} Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.099977 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b9sm6"] Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.100590 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b9sm6" podUID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" containerName="registry-server" containerID="cri-o://8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140" gracePeriod=2 Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.158764 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4kht" event={"ID":"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae","Type":"ContainerStarted","Data":"56e878a2b34e9d812d550a923bdc2e460d0ac8ad8f3d2e8d1978f795d29b86f1"} Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.580760 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.742008 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe909a3-b38b-41cf-8c35-403a2ab916f4-utilities\") pod \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\" (UID: \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\") " Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.742284 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe909a3-b38b-41cf-8c35-403a2ab916f4-catalog-content\") pod \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\" (UID: \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\") " Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.742637 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdg5q\" (UniqueName: \"kubernetes.io/projected/dbe909a3-b38b-41cf-8c35-403a2ab916f4-kube-api-access-jdg5q\") pod \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\" (UID: \"dbe909a3-b38b-41cf-8c35-403a2ab916f4\") " Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.743020 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbe909a3-b38b-41cf-8c35-403a2ab916f4-utilities" (OuterVolumeSpecName: "utilities") pod "dbe909a3-b38b-41cf-8c35-403a2ab916f4" (UID: "dbe909a3-b38b-41cf-8c35-403a2ab916f4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.743643 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe909a3-b38b-41cf-8c35-403a2ab916f4-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.755032 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbe909a3-b38b-41cf-8c35-403a2ab916f4-kube-api-access-jdg5q" (OuterVolumeSpecName: "kube-api-access-jdg5q") pod "dbe909a3-b38b-41cf-8c35-403a2ab916f4" (UID: "dbe909a3-b38b-41cf-8c35-403a2ab916f4"). InnerVolumeSpecName "kube-api-access-jdg5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.832699 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbe909a3-b38b-41cf-8c35-403a2ab916f4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dbe909a3-b38b-41cf-8c35-403a2ab916f4" (UID: "dbe909a3-b38b-41cf-8c35-403a2ab916f4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.846629 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe909a3-b38b-41cf-8c35-403a2ab916f4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:11:43 crc kubenswrapper[4800]: I1125 18:11:43.846684 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdg5q\" (UniqueName: \"kubernetes.io/projected/dbe909a3-b38b-41cf-8c35-403a2ab916f4-kube-api-access-jdg5q\") on node \"crc\" DevicePath \"\"" Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.171606 4800 generic.go:334] "Generic (PLEG): container finished" podID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" containerID="8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140" exitCode=0 Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.171663 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9sm6" event={"ID":"dbe909a3-b38b-41cf-8c35-403a2ab916f4","Type":"ContainerDied","Data":"8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140"} Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.171698 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9sm6" event={"ID":"dbe909a3-b38b-41cf-8c35-403a2ab916f4","Type":"ContainerDied","Data":"c275daabab7a58082185b9637d102473c8d58d3145be35e2cc3061cc17c6ac58"} Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.171715 4800 scope.go:117] "RemoveContainer" containerID="8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140" Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.171788 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b9sm6" Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.173964 4800 generic.go:334] "Generic (PLEG): container finished" podID="40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" containerID="56e878a2b34e9d812d550a923bdc2e460d0ac8ad8f3d2e8d1978f795d29b86f1" exitCode=0 Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.173988 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4kht" event={"ID":"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae","Type":"ContainerDied","Data":"56e878a2b34e9d812d550a923bdc2e460d0ac8ad8f3d2e8d1978f795d29b86f1"} Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.201281 4800 scope.go:117] "RemoveContainer" containerID="bb78edce0d50527fd6b0e2606e802acf30d1206f4836187aa76cdc23983a6fb8" Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.224770 4800 scope.go:117] "RemoveContainer" containerID="a8034bfe71d1938f73724376df8e55bffa6e35039e7e63102e810973c0ccbc6e" Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.237305 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b9sm6"] Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.248141 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b9sm6"] Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.269430 4800 scope.go:117] "RemoveContainer" containerID="8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140" Nov 25 18:11:44 crc kubenswrapper[4800]: E1125 18:11:44.269914 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140\": container with ID starting with 8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140 not found: ID does not exist" containerID="8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140" Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.269972 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140"} err="failed to get container status \"8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140\": rpc error: code = NotFound desc = could not find container \"8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140\": container with ID starting with 8d9bebc863192474e2c52b51aaecc9b4f09ad691fa656f0916219d5c0e883140 not found: ID does not exist" Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.270008 4800 scope.go:117] "RemoveContainer" containerID="bb78edce0d50527fd6b0e2606e802acf30d1206f4836187aa76cdc23983a6fb8" Nov 25 18:11:44 crc kubenswrapper[4800]: E1125 18:11:44.270360 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb78edce0d50527fd6b0e2606e802acf30d1206f4836187aa76cdc23983a6fb8\": container with ID starting with bb78edce0d50527fd6b0e2606e802acf30d1206f4836187aa76cdc23983a6fb8 not found: ID does not exist" containerID="bb78edce0d50527fd6b0e2606e802acf30d1206f4836187aa76cdc23983a6fb8" Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.270388 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb78edce0d50527fd6b0e2606e802acf30d1206f4836187aa76cdc23983a6fb8"} err="failed to get container status \"bb78edce0d50527fd6b0e2606e802acf30d1206f4836187aa76cdc23983a6fb8\": rpc error: code = NotFound desc = could not find container \"bb78edce0d50527fd6b0e2606e802acf30d1206f4836187aa76cdc23983a6fb8\": container with ID starting with bb78edce0d50527fd6b0e2606e802acf30d1206f4836187aa76cdc23983a6fb8 not found: ID does not exist" Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.270405 4800 scope.go:117] "RemoveContainer" containerID="a8034bfe71d1938f73724376df8e55bffa6e35039e7e63102e810973c0ccbc6e" Nov 25 18:11:44 crc kubenswrapper[4800]: E1125 18:11:44.270624 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8034bfe71d1938f73724376df8e55bffa6e35039e7e63102e810973c0ccbc6e\": container with ID starting with a8034bfe71d1938f73724376df8e55bffa6e35039e7e63102e810973c0ccbc6e not found: ID does not exist" containerID="a8034bfe71d1938f73724376df8e55bffa6e35039e7e63102e810973c0ccbc6e" Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.270660 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8034bfe71d1938f73724376df8e55bffa6e35039e7e63102e810973c0ccbc6e"} err="failed to get container status \"a8034bfe71d1938f73724376df8e55bffa6e35039e7e63102e810973c0ccbc6e\": rpc error: code = NotFound desc = could not find container \"a8034bfe71d1938f73724376df8e55bffa6e35039e7e63102e810973c0ccbc6e\": container with ID starting with a8034bfe71d1938f73724376df8e55bffa6e35039e7e63102e810973c0ccbc6e not found: ID does not exist" Nov 25 18:11:44 crc kubenswrapper[4800]: I1125 18:11:44.785709 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:11:44 crc kubenswrapper[4800]: E1125 18:11:44.786263 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:11:45 crc kubenswrapper[4800]: I1125 18:11:45.186502 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4kht" event={"ID":"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae","Type":"ContainerStarted","Data":"8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae"} Nov 25 18:11:45 crc kubenswrapper[4800]: I1125 18:11:45.209836 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-n4kht" podStartSLOduration=2.538639717 podStartE2EDuration="5.209806632s" podCreationTimestamp="2025-11-25 18:11:40 +0000 UTC" firstStartedPulling="2025-11-25 18:11:42.153014193 +0000 UTC m=+10463.207422685" lastFinishedPulling="2025-11-25 18:11:44.824181118 +0000 UTC m=+10465.878589600" observedRunningTime="2025-11-25 18:11:45.201880746 +0000 UTC m=+10466.256289248" watchObservedRunningTime="2025-11-25 18:11:45.209806632 +0000 UTC m=+10466.264215114" Nov 25 18:11:45 crc kubenswrapper[4800]: I1125 18:11:45.814629 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" path="/var/lib/kubelet/pods/dbe909a3-b38b-41cf-8c35-403a2ab916f4/volumes" Nov 25 18:11:50 crc kubenswrapper[4800]: I1125 18:11:50.460666 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:50 crc kubenswrapper[4800]: I1125 18:11:50.462035 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:50 crc kubenswrapper[4800]: I1125 18:11:50.515809 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:51 crc kubenswrapper[4800]: I1125 18:11:51.290674 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:51 crc kubenswrapper[4800]: I1125 18:11:51.501204 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n4kht"] Nov 25 18:11:53 crc kubenswrapper[4800]: I1125 18:11:53.262981 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-n4kht" podUID="40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" containerName="registry-server" containerID="cri-o://8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae" gracePeriod=2 Nov 25 18:11:53 crc kubenswrapper[4800]: I1125 18:11:53.753825 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:53 crc kubenswrapper[4800]: I1125 18:11:53.856884 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-catalog-content\") pod \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\" (UID: \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\") " Nov 25 18:11:53 crc kubenswrapper[4800]: I1125 18:11:53.857271 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-utilities\") pod \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\" (UID: \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\") " Nov 25 18:11:53 crc kubenswrapper[4800]: I1125 18:11:53.857367 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzpjh\" (UniqueName: \"kubernetes.io/projected/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-kube-api-access-kzpjh\") pod \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\" (UID: \"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae\") " Nov 25 18:11:53 crc kubenswrapper[4800]: I1125 18:11:53.858934 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-utilities" (OuterVolumeSpecName: "utilities") pod "40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" (UID: "40da1b5f-71fd-4d13-b9c2-9175ac3a95ae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:11:53 crc kubenswrapper[4800]: I1125 18:11:53.865872 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-kube-api-access-kzpjh" (OuterVolumeSpecName: "kube-api-access-kzpjh") pod "40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" (UID: "40da1b5f-71fd-4d13-b9c2-9175ac3a95ae"). InnerVolumeSpecName "kube-api-access-kzpjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:11:53 crc kubenswrapper[4800]: I1125 18:11:53.960094 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:11:53 crc kubenswrapper[4800]: I1125 18:11:53.960492 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzpjh\" (UniqueName: \"kubernetes.io/projected/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-kube-api-access-kzpjh\") on node \"crc\" DevicePath \"\"" Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.244389 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" (UID: "40da1b5f-71fd-4d13-b9c2-9175ac3a95ae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.266341 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.276089 4800 generic.go:334] "Generic (PLEG): container finished" podID="40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" containerID="8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae" exitCode=0 Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.276142 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4kht" event={"ID":"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae","Type":"ContainerDied","Data":"8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae"} Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.276178 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4kht" event={"ID":"40da1b5f-71fd-4d13-b9c2-9175ac3a95ae","Type":"ContainerDied","Data":"8b43b157b8502cdfe930fea96788d6b1469b9643569ac903262811fac8e23cb1"} Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.276182 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n4kht" Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.276204 4800 scope.go:117] "RemoveContainer" containerID="8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae" Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.319524 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n4kht"] Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.323035 4800 scope.go:117] "RemoveContainer" containerID="56e878a2b34e9d812d550a923bdc2e460d0ac8ad8f3d2e8d1978f795d29b86f1" Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.328361 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-n4kht"] Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.347919 4800 scope.go:117] "RemoveContainer" containerID="ee8719223b8509503c5d78bbe50609fbb732e7e1a8b01236f54e0261b4f83bb5" Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.405509 4800 scope.go:117] "RemoveContainer" containerID="8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae" Nov 25 18:11:54 crc kubenswrapper[4800]: E1125 18:11:54.406077 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae\": container with ID starting with 8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae not found: ID does not exist" containerID="8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae" Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.406129 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae"} err="failed to get container status \"8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae\": rpc error: code = NotFound desc = could not find container \"8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae\": container with ID starting with 8251b3b383f3bbe4f98b60f3776e72a7c3ceb2f03863eb7c18b50f2019a5cdae not found: ID does not exist" Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.406161 4800 scope.go:117] "RemoveContainer" containerID="56e878a2b34e9d812d550a923bdc2e460d0ac8ad8f3d2e8d1978f795d29b86f1" Nov 25 18:11:54 crc kubenswrapper[4800]: E1125 18:11:54.406538 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56e878a2b34e9d812d550a923bdc2e460d0ac8ad8f3d2e8d1978f795d29b86f1\": container with ID starting with 56e878a2b34e9d812d550a923bdc2e460d0ac8ad8f3d2e8d1978f795d29b86f1 not found: ID does not exist" containerID="56e878a2b34e9d812d550a923bdc2e460d0ac8ad8f3d2e8d1978f795d29b86f1" Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.406584 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56e878a2b34e9d812d550a923bdc2e460d0ac8ad8f3d2e8d1978f795d29b86f1"} err="failed to get container status \"56e878a2b34e9d812d550a923bdc2e460d0ac8ad8f3d2e8d1978f795d29b86f1\": rpc error: code = NotFound desc = could not find container \"56e878a2b34e9d812d550a923bdc2e460d0ac8ad8f3d2e8d1978f795d29b86f1\": container with ID starting with 56e878a2b34e9d812d550a923bdc2e460d0ac8ad8f3d2e8d1978f795d29b86f1 not found: ID does not exist" Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.406611 4800 scope.go:117] "RemoveContainer" containerID="ee8719223b8509503c5d78bbe50609fbb732e7e1a8b01236f54e0261b4f83bb5" Nov 25 18:11:54 crc kubenswrapper[4800]: E1125 18:11:54.407334 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee8719223b8509503c5d78bbe50609fbb732e7e1a8b01236f54e0261b4f83bb5\": container with ID starting with ee8719223b8509503c5d78bbe50609fbb732e7e1a8b01236f54e0261b4f83bb5 not found: ID does not exist" containerID="ee8719223b8509503c5d78bbe50609fbb732e7e1a8b01236f54e0261b4f83bb5" Nov 25 18:11:54 crc kubenswrapper[4800]: I1125 18:11:54.407372 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee8719223b8509503c5d78bbe50609fbb732e7e1a8b01236f54e0261b4f83bb5"} err="failed to get container status \"ee8719223b8509503c5d78bbe50609fbb732e7e1a8b01236f54e0261b4f83bb5\": rpc error: code = NotFound desc = could not find container \"ee8719223b8509503c5d78bbe50609fbb732e7e1a8b01236f54e0261b4f83bb5\": container with ID starting with ee8719223b8509503c5d78bbe50609fbb732e7e1a8b01236f54e0261b4f83bb5 not found: ID does not exist" Nov 25 18:11:55 crc kubenswrapper[4800]: I1125 18:11:55.799768 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" path="/var/lib/kubelet/pods/40da1b5f-71fd-4d13-b9c2-9175ac3a95ae/volumes" Nov 25 18:11:56 crc kubenswrapper[4800]: I1125 18:11:56.786137 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:11:56 crc kubenswrapper[4800]: E1125 18:11:56.786691 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:12:09 crc kubenswrapper[4800]: I1125 18:12:09.801387 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:12:09 crc kubenswrapper[4800]: E1125 18:12:09.802222 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:12:23 crc kubenswrapper[4800]: I1125 18:12:23.786279 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:12:23 crc kubenswrapper[4800]: E1125 18:12:23.787251 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:12:34 crc kubenswrapper[4800]: I1125 18:12:34.785812 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:12:34 crc kubenswrapper[4800]: E1125 18:12:34.786678 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:12:46 crc kubenswrapper[4800]: I1125 18:12:46.785882 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:12:46 crc kubenswrapper[4800]: E1125 18:12:46.786588 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:13:01 crc kubenswrapper[4800]: I1125 18:13:01.787188 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:13:01 crc kubenswrapper[4800]: E1125 18:13:01.788192 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:13:16 crc kubenswrapper[4800]: I1125 18:13:16.797297 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:13:18 crc kubenswrapper[4800]: I1125 18:13:18.118067 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"a8c4ea772f008841729175b31f60a576a8aa05aa7616cac9253311e58f50fab7"} Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.259683 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8ctvq/must-gather-t5sw5"] Nov 25 18:13:40 crc kubenswrapper[4800]: E1125 18:13:40.261346 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" containerName="registry-server" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.261367 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" containerName="registry-server" Nov 25 18:13:40 crc kubenswrapper[4800]: E1125 18:13:40.261412 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" containerName="registry-server" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.261420 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" containerName="registry-server" Nov 25 18:13:40 crc kubenswrapper[4800]: E1125 18:13:40.261443 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" containerName="extract-content" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.261450 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" containerName="extract-content" Nov 25 18:13:40 crc kubenswrapper[4800]: E1125 18:13:40.261465 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" containerName="extract-utilities" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.261474 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" containerName="extract-utilities" Nov 25 18:13:40 crc kubenswrapper[4800]: E1125 18:13:40.261489 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" containerName="extract-content" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.261496 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" containerName="extract-content" Nov 25 18:13:40 crc kubenswrapper[4800]: E1125 18:13:40.261517 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" containerName="extract-utilities" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.261524 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" containerName="extract-utilities" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.261776 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbe909a3-b38b-41cf-8c35-403a2ab916f4" containerName="registry-server" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.261807 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="40da1b5f-71fd-4d13-b9c2-9175ac3a95ae" containerName="registry-server" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.263484 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/must-gather-t5sw5" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.265263 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-8ctvq"/"openshift-service-ca.crt" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.265730 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-8ctvq"/"default-dockercfg-nnxm9" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.267428 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-8ctvq"/"kube-root-ca.crt" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.268343 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-8ctvq/must-gather-t5sw5"] Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.453925 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/321aa8fa-429b-4e5c-af0c-6ebb42916be3-must-gather-output\") pod \"must-gather-t5sw5\" (UID: \"321aa8fa-429b-4e5c-af0c-6ebb42916be3\") " pod="openshift-must-gather-8ctvq/must-gather-t5sw5" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.454314 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhwbx\" (UniqueName: \"kubernetes.io/projected/321aa8fa-429b-4e5c-af0c-6ebb42916be3-kube-api-access-nhwbx\") pod \"must-gather-t5sw5\" (UID: \"321aa8fa-429b-4e5c-af0c-6ebb42916be3\") " pod="openshift-must-gather-8ctvq/must-gather-t5sw5" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.556408 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/321aa8fa-429b-4e5c-af0c-6ebb42916be3-must-gather-output\") pod \"must-gather-t5sw5\" (UID: \"321aa8fa-429b-4e5c-af0c-6ebb42916be3\") " pod="openshift-must-gather-8ctvq/must-gather-t5sw5" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.556726 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhwbx\" (UniqueName: \"kubernetes.io/projected/321aa8fa-429b-4e5c-af0c-6ebb42916be3-kube-api-access-nhwbx\") pod \"must-gather-t5sw5\" (UID: \"321aa8fa-429b-4e5c-af0c-6ebb42916be3\") " pod="openshift-must-gather-8ctvq/must-gather-t5sw5" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.557283 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/321aa8fa-429b-4e5c-af0c-6ebb42916be3-must-gather-output\") pod \"must-gather-t5sw5\" (UID: \"321aa8fa-429b-4e5c-af0c-6ebb42916be3\") " pod="openshift-must-gather-8ctvq/must-gather-t5sw5" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.580515 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhwbx\" (UniqueName: \"kubernetes.io/projected/321aa8fa-429b-4e5c-af0c-6ebb42916be3-kube-api-access-nhwbx\") pod \"must-gather-t5sw5\" (UID: \"321aa8fa-429b-4e5c-af0c-6ebb42916be3\") " pod="openshift-must-gather-8ctvq/must-gather-t5sw5" Nov 25 18:13:40 crc kubenswrapper[4800]: I1125 18:13:40.582441 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/must-gather-t5sw5" Nov 25 18:13:41 crc kubenswrapper[4800]: I1125 18:13:41.049944 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-8ctvq/must-gather-t5sw5"] Nov 25 18:13:41 crc kubenswrapper[4800]: I1125 18:13:41.428271 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8ctvq/must-gather-t5sw5" event={"ID":"321aa8fa-429b-4e5c-af0c-6ebb42916be3","Type":"ContainerStarted","Data":"e45a0775c102d89eb9551bf30f140436548c74b135297d673b9a3301e8f0b518"} Nov 25 18:13:45 crc kubenswrapper[4800]: I1125 18:13:45.468789 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8ctvq/must-gather-t5sw5" event={"ID":"321aa8fa-429b-4e5c-af0c-6ebb42916be3","Type":"ContainerStarted","Data":"fd9a565ff9720ad05ed5fda5aa4ed54194dac93e8ebce2dd3c187883c07d088d"} Nov 25 18:13:45 crc kubenswrapper[4800]: I1125 18:13:45.469261 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8ctvq/must-gather-t5sw5" event={"ID":"321aa8fa-429b-4e5c-af0c-6ebb42916be3","Type":"ContainerStarted","Data":"193a4a5469689a0ffd0b295abcba6d14562850a973e6853627b2ab44e2d66a55"} Nov 25 18:13:45 crc kubenswrapper[4800]: I1125 18:13:45.484449 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-8ctvq/must-gather-t5sw5" podStartSLOduration=1.729160286 podStartE2EDuration="5.484433552s" podCreationTimestamp="2025-11-25 18:13:40 +0000 UTC" firstStartedPulling="2025-11-25 18:13:41.055379852 +0000 UTC m=+10582.109788344" lastFinishedPulling="2025-11-25 18:13:44.810653128 +0000 UTC m=+10585.865061610" observedRunningTime="2025-11-25 18:13:45.483473256 +0000 UTC m=+10586.537881728" watchObservedRunningTime="2025-11-25 18:13:45.484433552 +0000 UTC m=+10586.538842024" Nov 25 18:13:50 crc kubenswrapper[4800]: I1125 18:13:50.331044 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8ctvq/crc-debug-47rv4"] Nov 25 18:13:50 crc kubenswrapper[4800]: I1125 18:13:50.333135 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/crc-debug-47rv4" Nov 25 18:13:50 crc kubenswrapper[4800]: I1125 18:13:50.364008 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gg5hg\" (UniqueName: \"kubernetes.io/projected/24d44b3b-bf13-43dd-93e7-e7c91f86b531-kube-api-access-gg5hg\") pod \"crc-debug-47rv4\" (UID: \"24d44b3b-bf13-43dd-93e7-e7c91f86b531\") " pod="openshift-must-gather-8ctvq/crc-debug-47rv4" Nov 25 18:13:50 crc kubenswrapper[4800]: I1125 18:13:50.364109 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/24d44b3b-bf13-43dd-93e7-e7c91f86b531-host\") pod \"crc-debug-47rv4\" (UID: \"24d44b3b-bf13-43dd-93e7-e7c91f86b531\") " pod="openshift-must-gather-8ctvq/crc-debug-47rv4" Nov 25 18:13:50 crc kubenswrapper[4800]: I1125 18:13:50.466153 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/24d44b3b-bf13-43dd-93e7-e7c91f86b531-host\") pod \"crc-debug-47rv4\" (UID: \"24d44b3b-bf13-43dd-93e7-e7c91f86b531\") " pod="openshift-must-gather-8ctvq/crc-debug-47rv4" Nov 25 18:13:50 crc kubenswrapper[4800]: I1125 18:13:50.466394 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/24d44b3b-bf13-43dd-93e7-e7c91f86b531-host\") pod \"crc-debug-47rv4\" (UID: \"24d44b3b-bf13-43dd-93e7-e7c91f86b531\") " pod="openshift-must-gather-8ctvq/crc-debug-47rv4" Nov 25 18:13:50 crc kubenswrapper[4800]: I1125 18:13:50.466725 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gg5hg\" (UniqueName: \"kubernetes.io/projected/24d44b3b-bf13-43dd-93e7-e7c91f86b531-kube-api-access-gg5hg\") pod \"crc-debug-47rv4\" (UID: \"24d44b3b-bf13-43dd-93e7-e7c91f86b531\") " pod="openshift-must-gather-8ctvq/crc-debug-47rv4" Nov 25 18:13:50 crc kubenswrapper[4800]: I1125 18:13:50.492982 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gg5hg\" (UniqueName: \"kubernetes.io/projected/24d44b3b-bf13-43dd-93e7-e7c91f86b531-kube-api-access-gg5hg\") pod \"crc-debug-47rv4\" (UID: \"24d44b3b-bf13-43dd-93e7-e7c91f86b531\") " pod="openshift-must-gather-8ctvq/crc-debug-47rv4" Nov 25 18:13:50 crc kubenswrapper[4800]: I1125 18:13:50.652887 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/crc-debug-47rv4" Nov 25 18:13:51 crc kubenswrapper[4800]: I1125 18:13:51.523738 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8ctvq/crc-debug-47rv4" event={"ID":"24d44b3b-bf13-43dd-93e7-e7c91f86b531","Type":"ContainerStarted","Data":"18472ea6f3432b59455d92eadec7b12f06c4b2310ddd3d249f4cd5c666c156fc"} Nov 25 18:14:04 crc kubenswrapper[4800]: I1125 18:14:04.637778 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8ctvq/crc-debug-47rv4" event={"ID":"24d44b3b-bf13-43dd-93e7-e7c91f86b531","Type":"ContainerStarted","Data":"ad9223e8f87496aa2373267b7e782ccb13386ad573d64ddfe3f8a07c810447fd"} Nov 25 18:14:04 crc kubenswrapper[4800]: I1125 18:14:04.653161 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-8ctvq/crc-debug-47rv4" podStartSLOduration=1.411450927 podStartE2EDuration="14.65314042s" podCreationTimestamp="2025-11-25 18:13:50 +0000 UTC" firstStartedPulling="2025-11-25 18:13:50.698486715 +0000 UTC m=+10591.752895197" lastFinishedPulling="2025-11-25 18:14:03.940176188 +0000 UTC m=+10604.994584690" observedRunningTime="2025-11-25 18:14:04.647869347 +0000 UTC m=+10605.702277829" watchObservedRunningTime="2025-11-25 18:14:04.65314042 +0000 UTC m=+10605.707548912" Nov 25 18:14:53 crc kubenswrapper[4800]: I1125 18:14:53.352813 4800 generic.go:334] "Generic (PLEG): container finished" podID="24d44b3b-bf13-43dd-93e7-e7c91f86b531" containerID="ad9223e8f87496aa2373267b7e782ccb13386ad573d64ddfe3f8a07c810447fd" exitCode=0 Nov 25 18:14:53 crc kubenswrapper[4800]: I1125 18:14:53.352996 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8ctvq/crc-debug-47rv4" event={"ID":"24d44b3b-bf13-43dd-93e7-e7c91f86b531","Type":"ContainerDied","Data":"ad9223e8f87496aa2373267b7e782ccb13386ad573d64ddfe3f8a07c810447fd"} Nov 25 18:14:54 crc kubenswrapper[4800]: I1125 18:14:54.487887 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/crc-debug-47rv4" Nov 25 18:14:54 crc kubenswrapper[4800]: I1125 18:14:54.539544 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-8ctvq/crc-debug-47rv4"] Nov 25 18:14:54 crc kubenswrapper[4800]: I1125 18:14:54.550888 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-8ctvq/crc-debug-47rv4"] Nov 25 18:14:54 crc kubenswrapper[4800]: I1125 18:14:54.582559 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/24d44b3b-bf13-43dd-93e7-e7c91f86b531-host\") pod \"24d44b3b-bf13-43dd-93e7-e7c91f86b531\" (UID: \"24d44b3b-bf13-43dd-93e7-e7c91f86b531\") " Nov 25 18:14:54 crc kubenswrapper[4800]: I1125 18:14:54.582692 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24d44b3b-bf13-43dd-93e7-e7c91f86b531-host" (OuterVolumeSpecName: "host") pod "24d44b3b-bf13-43dd-93e7-e7c91f86b531" (UID: "24d44b3b-bf13-43dd-93e7-e7c91f86b531"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:14:54 crc kubenswrapper[4800]: I1125 18:14:54.582737 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gg5hg\" (UniqueName: \"kubernetes.io/projected/24d44b3b-bf13-43dd-93e7-e7c91f86b531-kube-api-access-gg5hg\") pod \"24d44b3b-bf13-43dd-93e7-e7c91f86b531\" (UID: \"24d44b3b-bf13-43dd-93e7-e7c91f86b531\") " Nov 25 18:14:54 crc kubenswrapper[4800]: I1125 18:14:54.583748 4800 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/24d44b3b-bf13-43dd-93e7-e7c91f86b531-host\") on node \"crc\" DevicePath \"\"" Nov 25 18:14:54 crc kubenswrapper[4800]: I1125 18:14:54.592640 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24d44b3b-bf13-43dd-93e7-e7c91f86b531-kube-api-access-gg5hg" (OuterVolumeSpecName: "kube-api-access-gg5hg") pod "24d44b3b-bf13-43dd-93e7-e7c91f86b531" (UID: "24d44b3b-bf13-43dd-93e7-e7c91f86b531"). InnerVolumeSpecName "kube-api-access-gg5hg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:14:54 crc kubenswrapper[4800]: I1125 18:14:54.684676 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gg5hg\" (UniqueName: \"kubernetes.io/projected/24d44b3b-bf13-43dd-93e7-e7c91f86b531-kube-api-access-gg5hg\") on node \"crc\" DevicePath \"\"" Nov 25 18:14:55 crc kubenswrapper[4800]: I1125 18:14:55.382455 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18472ea6f3432b59455d92eadec7b12f06c4b2310ddd3d249f4cd5c666c156fc" Nov 25 18:14:55 crc kubenswrapper[4800]: I1125 18:14:55.382542 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/crc-debug-47rv4" Nov 25 18:14:55 crc kubenswrapper[4800]: I1125 18:14:55.781566 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8ctvq/crc-debug-zt8r8"] Nov 25 18:14:55 crc kubenswrapper[4800]: E1125 18:14:55.782567 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24d44b3b-bf13-43dd-93e7-e7c91f86b531" containerName="container-00" Nov 25 18:14:55 crc kubenswrapper[4800]: I1125 18:14:55.782591 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="24d44b3b-bf13-43dd-93e7-e7c91f86b531" containerName="container-00" Nov 25 18:14:55 crc kubenswrapper[4800]: I1125 18:14:55.783003 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="24d44b3b-bf13-43dd-93e7-e7c91f86b531" containerName="container-00" Nov 25 18:14:55 crc kubenswrapper[4800]: I1125 18:14:55.784157 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" Nov 25 18:14:55 crc kubenswrapper[4800]: I1125 18:14:55.826013 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24d44b3b-bf13-43dd-93e7-e7c91f86b531" path="/var/lib/kubelet/pods/24d44b3b-bf13-43dd-93e7-e7c91f86b531/volumes" Nov 25 18:14:55 crc kubenswrapper[4800]: I1125 18:14:55.917343 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d0a8edc8-be34-4629-8d11-e4c8d5183243-host\") pod \"crc-debug-zt8r8\" (UID: \"d0a8edc8-be34-4629-8d11-e4c8d5183243\") " pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" Nov 25 18:14:55 crc kubenswrapper[4800]: I1125 18:14:55.918934 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbh95\" (UniqueName: \"kubernetes.io/projected/d0a8edc8-be34-4629-8d11-e4c8d5183243-kube-api-access-jbh95\") pod \"crc-debug-zt8r8\" (UID: \"d0a8edc8-be34-4629-8d11-e4c8d5183243\") " pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" Nov 25 18:14:56 crc kubenswrapper[4800]: I1125 18:14:56.021671 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbh95\" (UniqueName: \"kubernetes.io/projected/d0a8edc8-be34-4629-8d11-e4c8d5183243-kube-api-access-jbh95\") pod \"crc-debug-zt8r8\" (UID: \"d0a8edc8-be34-4629-8d11-e4c8d5183243\") " pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" Nov 25 18:14:56 crc kubenswrapper[4800]: I1125 18:14:56.021982 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d0a8edc8-be34-4629-8d11-e4c8d5183243-host\") pod \"crc-debug-zt8r8\" (UID: \"d0a8edc8-be34-4629-8d11-e4c8d5183243\") " pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" Nov 25 18:14:56 crc kubenswrapper[4800]: I1125 18:14:56.022150 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d0a8edc8-be34-4629-8d11-e4c8d5183243-host\") pod \"crc-debug-zt8r8\" (UID: \"d0a8edc8-be34-4629-8d11-e4c8d5183243\") " pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" Nov 25 18:14:56 crc kubenswrapper[4800]: I1125 18:14:56.047124 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbh95\" (UniqueName: \"kubernetes.io/projected/d0a8edc8-be34-4629-8d11-e4c8d5183243-kube-api-access-jbh95\") pod \"crc-debug-zt8r8\" (UID: \"d0a8edc8-be34-4629-8d11-e4c8d5183243\") " pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" Nov 25 18:14:56 crc kubenswrapper[4800]: I1125 18:14:56.121799 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" Nov 25 18:14:56 crc kubenswrapper[4800]: I1125 18:14:56.399170 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" event={"ID":"d0a8edc8-be34-4629-8d11-e4c8d5183243","Type":"ContainerStarted","Data":"5be6401c3d2d5d395a4364cd641315a6607f730884fee1d4ba4584e712a218c8"} Nov 25 18:14:57 crc kubenswrapper[4800]: I1125 18:14:57.413863 4800 generic.go:334] "Generic (PLEG): container finished" podID="d0a8edc8-be34-4629-8d11-e4c8d5183243" containerID="71860c49a3e6c0d5fc60098df5a9e8a00f730608eba102976931b876a6a53a4c" exitCode=0 Nov 25 18:14:57 crc kubenswrapper[4800]: I1125 18:14:57.414136 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" event={"ID":"d0a8edc8-be34-4629-8d11-e4c8d5183243","Type":"ContainerDied","Data":"71860c49a3e6c0d5fc60098df5a9e8a00f730608eba102976931b876a6a53a4c"} Nov 25 18:14:58 crc kubenswrapper[4800]: I1125 18:14:58.791539 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" Nov 25 18:14:58 crc kubenswrapper[4800]: I1125 18:14:58.873669 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d0a8edc8-be34-4629-8d11-e4c8d5183243-host\") pod \"d0a8edc8-be34-4629-8d11-e4c8d5183243\" (UID: \"d0a8edc8-be34-4629-8d11-e4c8d5183243\") " Nov 25 18:14:58 crc kubenswrapper[4800]: I1125 18:14:58.873831 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d0a8edc8-be34-4629-8d11-e4c8d5183243-host" (OuterVolumeSpecName: "host") pod "d0a8edc8-be34-4629-8d11-e4c8d5183243" (UID: "d0a8edc8-be34-4629-8d11-e4c8d5183243"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:14:58 crc kubenswrapper[4800]: I1125 18:14:58.873857 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbh95\" (UniqueName: \"kubernetes.io/projected/d0a8edc8-be34-4629-8d11-e4c8d5183243-kube-api-access-jbh95\") pod \"d0a8edc8-be34-4629-8d11-e4c8d5183243\" (UID: \"d0a8edc8-be34-4629-8d11-e4c8d5183243\") " Nov 25 18:14:58 crc kubenswrapper[4800]: I1125 18:14:58.874817 4800 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d0a8edc8-be34-4629-8d11-e4c8d5183243-host\") on node \"crc\" DevicePath \"\"" Nov 25 18:14:58 crc kubenswrapper[4800]: I1125 18:14:58.891484 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0a8edc8-be34-4629-8d11-e4c8d5183243-kube-api-access-jbh95" (OuterVolumeSpecName: "kube-api-access-jbh95") pod "d0a8edc8-be34-4629-8d11-e4c8d5183243" (UID: "d0a8edc8-be34-4629-8d11-e4c8d5183243"). InnerVolumeSpecName "kube-api-access-jbh95". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:14:58 crc kubenswrapper[4800]: I1125 18:14:58.976250 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbh95\" (UniqueName: \"kubernetes.io/projected/d0a8edc8-be34-4629-8d11-e4c8d5183243-kube-api-access-jbh95\") on node \"crc\" DevicePath \"\"" Nov 25 18:14:59 crc kubenswrapper[4800]: I1125 18:14:59.432345 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" event={"ID":"d0a8edc8-be34-4629-8d11-e4c8d5183243","Type":"ContainerDied","Data":"5be6401c3d2d5d395a4364cd641315a6607f730884fee1d4ba4584e712a218c8"} Nov 25 18:14:59 crc kubenswrapper[4800]: I1125 18:14:59.432872 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5be6401c3d2d5d395a4364cd641315a6607f730884fee1d4ba4584e712a218c8" Nov 25 18:14:59 crc kubenswrapper[4800]: I1125 18:14:59.432390 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/crc-debug-zt8r8" Nov 25 18:14:59 crc kubenswrapper[4800]: I1125 18:14:59.909310 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-8ctvq/crc-debug-zt8r8"] Nov 25 18:14:59 crc kubenswrapper[4800]: I1125 18:14:59.917885 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-8ctvq/crc-debug-zt8r8"] Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.170121 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc"] Nov 25 18:15:00 crc kubenswrapper[4800]: E1125 18:15:00.170622 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a8edc8-be34-4629-8d11-e4c8d5183243" containerName="container-00" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.170649 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a8edc8-be34-4629-8d11-e4c8d5183243" containerName="container-00" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.170952 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0a8edc8-be34-4629-8d11-e4c8d5183243" containerName="container-00" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.171787 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.179050 4800 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.179245 4800 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.210451 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc"] Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.305025 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26b822fa-6862-49f0-8faa-d58e9f258f26-config-volume\") pod \"collect-profiles-29401575-w8gnc\" (UID: \"26b822fa-6862-49f0-8faa-d58e9f258f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.305715 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26b822fa-6862-49f0-8faa-d58e9f258f26-secret-volume\") pod \"collect-profiles-29401575-w8gnc\" (UID: \"26b822fa-6862-49f0-8faa-d58e9f258f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.305899 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvm99\" (UniqueName: \"kubernetes.io/projected/26b822fa-6862-49f0-8faa-d58e9f258f26-kube-api-access-tvm99\") pod \"collect-profiles-29401575-w8gnc\" (UID: \"26b822fa-6862-49f0-8faa-d58e9f258f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.408372 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26b822fa-6862-49f0-8faa-d58e9f258f26-secret-volume\") pod \"collect-profiles-29401575-w8gnc\" (UID: \"26b822fa-6862-49f0-8faa-d58e9f258f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.408493 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvm99\" (UniqueName: \"kubernetes.io/projected/26b822fa-6862-49f0-8faa-d58e9f258f26-kube-api-access-tvm99\") pod \"collect-profiles-29401575-w8gnc\" (UID: \"26b822fa-6862-49f0-8faa-d58e9f258f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.408542 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26b822fa-6862-49f0-8faa-d58e9f258f26-config-volume\") pod \"collect-profiles-29401575-w8gnc\" (UID: \"26b822fa-6862-49f0-8faa-d58e9f258f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.410378 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26b822fa-6862-49f0-8faa-d58e9f258f26-config-volume\") pod \"collect-profiles-29401575-w8gnc\" (UID: \"26b822fa-6862-49f0-8faa-d58e9f258f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.868301 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26b822fa-6862-49f0-8faa-d58e9f258f26-secret-volume\") pod \"collect-profiles-29401575-w8gnc\" (UID: \"26b822fa-6862-49f0-8faa-d58e9f258f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:00 crc kubenswrapper[4800]: I1125 18:15:00.880791 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvm99\" (UniqueName: \"kubernetes.io/projected/26b822fa-6862-49f0-8faa-d58e9f258f26-kube-api-access-tvm99\") pod \"collect-profiles-29401575-w8gnc\" (UID: \"26b822fa-6862-49f0-8faa-d58e9f258f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:01 crc kubenswrapper[4800]: I1125 18:15:01.085444 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8ctvq/crc-debug-wfdg4"] Nov 25 18:15:01 crc kubenswrapper[4800]: I1125 18:15:01.087288 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/crc-debug-wfdg4" Nov 25 18:15:01 crc kubenswrapper[4800]: I1125 18:15:01.104490 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:01 crc kubenswrapper[4800]: I1125 18:15:01.239792 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b4989247-b745-4920-8183-da703e82f2cd-host\") pod \"crc-debug-wfdg4\" (UID: \"b4989247-b745-4920-8183-da703e82f2cd\") " pod="openshift-must-gather-8ctvq/crc-debug-wfdg4" Nov 25 18:15:01 crc kubenswrapper[4800]: I1125 18:15:01.240096 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g244g\" (UniqueName: \"kubernetes.io/projected/b4989247-b745-4920-8183-da703e82f2cd-kube-api-access-g244g\") pod \"crc-debug-wfdg4\" (UID: \"b4989247-b745-4920-8183-da703e82f2cd\") " pod="openshift-must-gather-8ctvq/crc-debug-wfdg4" Nov 25 18:15:01 crc kubenswrapper[4800]: I1125 18:15:01.341999 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g244g\" (UniqueName: \"kubernetes.io/projected/b4989247-b745-4920-8183-da703e82f2cd-kube-api-access-g244g\") pod \"crc-debug-wfdg4\" (UID: \"b4989247-b745-4920-8183-da703e82f2cd\") " pod="openshift-must-gather-8ctvq/crc-debug-wfdg4" Nov 25 18:15:01 crc kubenswrapper[4800]: I1125 18:15:01.342495 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b4989247-b745-4920-8183-da703e82f2cd-host\") pod \"crc-debug-wfdg4\" (UID: \"b4989247-b745-4920-8183-da703e82f2cd\") " pod="openshift-must-gather-8ctvq/crc-debug-wfdg4" Nov 25 18:15:01 crc kubenswrapper[4800]: I1125 18:15:01.342597 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b4989247-b745-4920-8183-da703e82f2cd-host\") pod \"crc-debug-wfdg4\" (UID: \"b4989247-b745-4920-8183-da703e82f2cd\") " pod="openshift-must-gather-8ctvq/crc-debug-wfdg4" Nov 25 18:15:01 crc kubenswrapper[4800]: I1125 18:15:01.365960 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g244g\" (UniqueName: \"kubernetes.io/projected/b4989247-b745-4920-8183-da703e82f2cd-kube-api-access-g244g\") pod \"crc-debug-wfdg4\" (UID: \"b4989247-b745-4920-8183-da703e82f2cd\") " pod="openshift-must-gather-8ctvq/crc-debug-wfdg4" Nov 25 18:15:01 crc kubenswrapper[4800]: I1125 18:15:01.423291 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/crc-debug-wfdg4" Nov 25 18:15:01 crc kubenswrapper[4800]: I1125 18:15:01.595576 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc"] Nov 25 18:15:01 crc kubenswrapper[4800]: I1125 18:15:01.821088 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0a8edc8-be34-4629-8d11-e4c8d5183243" path="/var/lib/kubelet/pods/d0a8edc8-be34-4629-8d11-e4c8d5183243/volumes" Nov 25 18:15:02 crc kubenswrapper[4800]: I1125 18:15:02.462334 4800 generic.go:334] "Generic (PLEG): container finished" podID="26b822fa-6862-49f0-8faa-d58e9f258f26" containerID="6e0eea6b5aad7c15edf8cd395ff16b1489f70e04f83e2ad866551d5d5cb5bc9c" exitCode=0 Nov 25 18:15:02 crc kubenswrapper[4800]: I1125 18:15:02.462398 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" event={"ID":"26b822fa-6862-49f0-8faa-d58e9f258f26","Type":"ContainerDied","Data":"6e0eea6b5aad7c15edf8cd395ff16b1489f70e04f83e2ad866551d5d5cb5bc9c"} Nov 25 18:15:02 crc kubenswrapper[4800]: I1125 18:15:02.462876 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" event={"ID":"26b822fa-6862-49f0-8faa-d58e9f258f26","Type":"ContainerStarted","Data":"6772dc4035624fb39e57a23a9e2f10ffb4f2b6f5ff81514d4605f360c140514b"} Nov 25 18:15:02 crc kubenswrapper[4800]: I1125 18:15:02.467374 4800 generic.go:334] "Generic (PLEG): container finished" podID="b4989247-b745-4920-8183-da703e82f2cd" containerID="121af9e1f940808bdd8fe72af0ed9bad824d04db12cb6ed69329cdf7f86dd94e" exitCode=0 Nov 25 18:15:02 crc kubenswrapper[4800]: I1125 18:15:02.467428 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8ctvq/crc-debug-wfdg4" event={"ID":"b4989247-b745-4920-8183-da703e82f2cd","Type":"ContainerDied","Data":"121af9e1f940808bdd8fe72af0ed9bad824d04db12cb6ed69329cdf7f86dd94e"} Nov 25 18:15:02 crc kubenswrapper[4800]: I1125 18:15:02.467469 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8ctvq/crc-debug-wfdg4" event={"ID":"b4989247-b745-4920-8183-da703e82f2cd","Type":"ContainerStarted","Data":"c4978ef131bab05bfb7bc18b8c6ff03cb71cd040be22a54482fcbdbda7f2b8e7"} Nov 25 18:15:02 crc kubenswrapper[4800]: I1125 18:15:02.547293 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-8ctvq/crc-debug-wfdg4"] Nov 25 18:15:02 crc kubenswrapper[4800]: I1125 18:15:02.557187 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-8ctvq/crc-debug-wfdg4"] Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.578458 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/crc-debug-wfdg4" Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.690080 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g244g\" (UniqueName: \"kubernetes.io/projected/b4989247-b745-4920-8183-da703e82f2cd-kube-api-access-g244g\") pod \"b4989247-b745-4920-8183-da703e82f2cd\" (UID: \"b4989247-b745-4920-8183-da703e82f2cd\") " Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.690817 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b4989247-b745-4920-8183-da703e82f2cd-host\") pod \"b4989247-b745-4920-8183-da703e82f2cd\" (UID: \"b4989247-b745-4920-8183-da703e82f2cd\") " Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.691402 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b4989247-b745-4920-8183-da703e82f2cd-host" (OuterVolumeSpecName: "host") pod "b4989247-b745-4920-8183-da703e82f2cd" (UID: "b4989247-b745-4920-8183-da703e82f2cd"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.691756 4800 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b4989247-b745-4920-8183-da703e82f2cd-host\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.711046 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4989247-b745-4920-8183-da703e82f2cd-kube-api-access-g244g" (OuterVolumeSpecName: "kube-api-access-g244g") pod "b4989247-b745-4920-8183-da703e82f2cd" (UID: "b4989247-b745-4920-8183-da703e82f2cd"). InnerVolumeSpecName "kube-api-access-g244g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.793018 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g244g\" (UniqueName: \"kubernetes.io/projected/b4989247-b745-4920-8183-da703e82f2cd-kube-api-access-g244g\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.799028 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4989247-b745-4920-8183-da703e82f2cd" path="/var/lib/kubelet/pods/b4989247-b745-4920-8183-da703e82f2cd/volumes" Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.867677 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.996498 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvm99\" (UniqueName: \"kubernetes.io/projected/26b822fa-6862-49f0-8faa-d58e9f258f26-kube-api-access-tvm99\") pod \"26b822fa-6862-49f0-8faa-d58e9f258f26\" (UID: \"26b822fa-6862-49f0-8faa-d58e9f258f26\") " Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.996690 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26b822fa-6862-49f0-8faa-d58e9f258f26-secret-volume\") pod \"26b822fa-6862-49f0-8faa-d58e9f258f26\" (UID: \"26b822fa-6862-49f0-8faa-d58e9f258f26\") " Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.997490 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26b822fa-6862-49f0-8faa-d58e9f258f26-config-volume\") pod \"26b822fa-6862-49f0-8faa-d58e9f258f26\" (UID: \"26b822fa-6862-49f0-8faa-d58e9f258f26\") " Nov 25 18:15:03 crc kubenswrapper[4800]: I1125 18:15:03.998410 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26b822fa-6862-49f0-8faa-d58e9f258f26-config-volume" (OuterVolumeSpecName: "config-volume") pod "26b822fa-6862-49f0-8faa-d58e9f258f26" (UID: "26b822fa-6862-49f0-8faa-d58e9f258f26"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.000485 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26b822fa-6862-49f0-8faa-d58e9f258f26-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "26b822fa-6862-49f0-8faa-d58e9f258f26" (UID: "26b822fa-6862-49f0-8faa-d58e9f258f26"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.002006 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26b822fa-6862-49f0-8faa-d58e9f258f26-kube-api-access-tvm99" (OuterVolumeSpecName: "kube-api-access-tvm99") pod "26b822fa-6862-49f0-8faa-d58e9f258f26" (UID: "26b822fa-6862-49f0-8faa-d58e9f258f26"). InnerVolumeSpecName "kube-api-access-tvm99". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.079873 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vw48p"] Nov 25 18:15:04 crc kubenswrapper[4800]: E1125 18:15:04.080466 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26b822fa-6862-49f0-8faa-d58e9f258f26" containerName="collect-profiles" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.080494 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="26b822fa-6862-49f0-8faa-d58e9f258f26" containerName="collect-profiles" Nov 25 18:15:04 crc kubenswrapper[4800]: E1125 18:15:04.080516 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4989247-b745-4920-8183-da703e82f2cd" containerName="container-00" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.080525 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4989247-b745-4920-8183-da703e82f2cd" containerName="container-00" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.080791 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4989247-b745-4920-8183-da703e82f2cd" containerName="container-00" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.080823 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="26b822fa-6862-49f0-8faa-d58e9f258f26" containerName="collect-profiles" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.082507 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.097667 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vw48p"] Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.101973 4800 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26b822fa-6862-49f0-8faa-d58e9f258f26-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.102008 4800 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26b822fa-6862-49f0-8faa-d58e9f258f26-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.102021 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvm99\" (UniqueName: \"kubernetes.io/projected/26b822fa-6862-49f0-8faa-d58e9f258f26-kube-api-access-tvm99\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.203622 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/880af8be-a38e-4540-ae90-f0bf16724c49-catalog-content\") pod \"certified-operators-vw48p\" (UID: \"880af8be-a38e-4540-ae90-f0bf16724c49\") " pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.203897 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/880af8be-a38e-4540-ae90-f0bf16724c49-utilities\") pod \"certified-operators-vw48p\" (UID: \"880af8be-a38e-4540-ae90-f0bf16724c49\") " pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.203972 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phwhv\" (UniqueName: \"kubernetes.io/projected/880af8be-a38e-4540-ae90-f0bf16724c49-kube-api-access-phwhv\") pod \"certified-operators-vw48p\" (UID: \"880af8be-a38e-4540-ae90-f0bf16724c49\") " pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.305827 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/880af8be-a38e-4540-ae90-f0bf16724c49-utilities\") pod \"certified-operators-vw48p\" (UID: \"880af8be-a38e-4540-ae90-f0bf16724c49\") " pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.305893 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phwhv\" (UniqueName: \"kubernetes.io/projected/880af8be-a38e-4540-ae90-f0bf16724c49-kube-api-access-phwhv\") pod \"certified-operators-vw48p\" (UID: \"880af8be-a38e-4540-ae90-f0bf16724c49\") " pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.306009 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/880af8be-a38e-4540-ae90-f0bf16724c49-catalog-content\") pod \"certified-operators-vw48p\" (UID: \"880af8be-a38e-4540-ae90-f0bf16724c49\") " pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.306564 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/880af8be-a38e-4540-ae90-f0bf16724c49-catalog-content\") pod \"certified-operators-vw48p\" (UID: \"880af8be-a38e-4540-ae90-f0bf16724c49\") " pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.306596 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/880af8be-a38e-4540-ae90-f0bf16724c49-utilities\") pod \"certified-operators-vw48p\" (UID: \"880af8be-a38e-4540-ae90-f0bf16724c49\") " pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.333629 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phwhv\" (UniqueName: \"kubernetes.io/projected/880af8be-a38e-4540-ae90-f0bf16724c49-kube-api-access-phwhv\") pod \"certified-operators-vw48p\" (UID: \"880af8be-a38e-4540-ae90-f0bf16724c49\") " pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.404318 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.489131 4800 scope.go:117] "RemoveContainer" containerID="121af9e1f940808bdd8fe72af0ed9bad824d04db12cb6ed69329cdf7f86dd94e" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.489504 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/crc-debug-wfdg4" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.498447 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" event={"ID":"26b822fa-6862-49f0-8faa-d58e9f258f26","Type":"ContainerDied","Data":"6772dc4035624fb39e57a23a9e2f10ffb4f2b6f5ff81514d4605f360c140514b"} Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.498484 4800 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6772dc4035624fb39e57a23a9e2f10ffb4f2b6f5ff81514d4605f360c140514b" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.498531 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-w8gnc" Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.916873 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vw48p"] Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.946221 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2"] Nov 25 18:15:04 crc kubenswrapper[4800]: I1125 18:15:04.957200 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401530-5mjz2"] Nov 25 18:15:05 crc kubenswrapper[4800]: I1125 18:15:05.512095 4800 generic.go:334] "Generic (PLEG): container finished" podID="880af8be-a38e-4540-ae90-f0bf16724c49" containerID="cfdf0d7deb2b319059e48ab1a0e77725ba2a6a879e1db562aaaca263cd5e8275" exitCode=0 Nov 25 18:15:05 crc kubenswrapper[4800]: I1125 18:15:05.512155 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vw48p" event={"ID":"880af8be-a38e-4540-ae90-f0bf16724c49","Type":"ContainerDied","Data":"cfdf0d7deb2b319059e48ab1a0e77725ba2a6a879e1db562aaaca263cd5e8275"} Nov 25 18:15:05 crc kubenswrapper[4800]: I1125 18:15:05.512672 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vw48p" event={"ID":"880af8be-a38e-4540-ae90-f0bf16724c49","Type":"ContainerStarted","Data":"f1f9019162ca9c23e3ca283112f67aacdf767caa688b8bd52752075210ff9483"} Nov 25 18:15:05 crc kubenswrapper[4800]: I1125 18:15:05.796482 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05835060-0875-43ba-ab10-5d437d6d5a40" path="/var/lib/kubelet/pods/05835060-0875-43ba-ab10-5d437d6d5a40/volumes" Nov 25 18:15:11 crc kubenswrapper[4800]: I1125 18:15:11.569754 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vw48p" event={"ID":"880af8be-a38e-4540-ae90-f0bf16724c49","Type":"ContainerStarted","Data":"ac911caa3a5d88ba564cf3999a848d0baa887cfbb4548653a473ec1e9de8641a"} Nov 25 18:15:12 crc kubenswrapper[4800]: I1125 18:15:12.582791 4800 generic.go:334] "Generic (PLEG): container finished" podID="880af8be-a38e-4540-ae90-f0bf16724c49" containerID="ac911caa3a5d88ba564cf3999a848d0baa887cfbb4548653a473ec1e9de8641a" exitCode=0 Nov 25 18:15:12 crc kubenswrapper[4800]: I1125 18:15:12.582896 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vw48p" event={"ID":"880af8be-a38e-4540-ae90-f0bf16724c49","Type":"ContainerDied","Data":"ac911caa3a5d88ba564cf3999a848d0baa887cfbb4548653a473ec1e9de8641a"} Nov 25 18:15:14 crc kubenswrapper[4800]: I1125 18:15:14.603781 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vw48p" event={"ID":"880af8be-a38e-4540-ae90-f0bf16724c49","Type":"ContainerStarted","Data":"5529ddbdfa936f9830cd62c74c9fa0ce9393e15db712caf8e024352ce0aa15d1"} Nov 25 18:15:14 crc kubenswrapper[4800]: I1125 18:15:14.629672 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vw48p" podStartSLOduration=2.646983694 podStartE2EDuration="10.629654607s" podCreationTimestamp="2025-11-25 18:15:04 +0000 UTC" firstStartedPulling="2025-11-25 18:15:05.516641093 +0000 UTC m=+10666.571049575" lastFinishedPulling="2025-11-25 18:15:13.499311996 +0000 UTC m=+10674.553720488" observedRunningTime="2025-11-25 18:15:14.624172698 +0000 UTC m=+10675.678581220" watchObservedRunningTime="2025-11-25 18:15:14.629654607 +0000 UTC m=+10675.684063089" Nov 25 18:15:24 crc kubenswrapper[4800]: I1125 18:15:24.404805 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:24 crc kubenswrapper[4800]: I1125 18:15:24.405322 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:24 crc kubenswrapper[4800]: I1125 18:15:24.457279 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:24 crc kubenswrapper[4800]: I1125 18:15:24.752661 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vw48p" Nov 25 18:15:24 crc kubenswrapper[4800]: I1125 18:15:24.823412 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vw48p"] Nov 25 18:15:24 crc kubenswrapper[4800]: I1125 18:15:24.869765 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9m5fb"] Nov 25 18:15:25 crc kubenswrapper[4800]: I1125 18:15:25.709477 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9m5fb" podUID="66a7b373-f344-4ea3-a777-7b1d1b2deaad" containerName="registry-server" containerID="cri-o://540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1" gracePeriod=2 Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.278218 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.435394 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a7b373-f344-4ea3-a777-7b1d1b2deaad-catalog-content\") pod \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\" (UID: \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\") " Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.435744 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2rx5\" (UniqueName: \"kubernetes.io/projected/66a7b373-f344-4ea3-a777-7b1d1b2deaad-kube-api-access-f2rx5\") pod \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\" (UID: \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\") " Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.435782 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a7b373-f344-4ea3-a777-7b1d1b2deaad-utilities\") pod \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\" (UID: \"66a7b373-f344-4ea3-a777-7b1d1b2deaad\") " Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.436359 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66a7b373-f344-4ea3-a777-7b1d1b2deaad-utilities" (OuterVolumeSpecName: "utilities") pod "66a7b373-f344-4ea3-a777-7b1d1b2deaad" (UID: "66a7b373-f344-4ea3-a777-7b1d1b2deaad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.436827 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a7b373-f344-4ea3-a777-7b1d1b2deaad-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.447053 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66a7b373-f344-4ea3-a777-7b1d1b2deaad-kube-api-access-f2rx5" (OuterVolumeSpecName: "kube-api-access-f2rx5") pod "66a7b373-f344-4ea3-a777-7b1d1b2deaad" (UID: "66a7b373-f344-4ea3-a777-7b1d1b2deaad"). InnerVolumeSpecName "kube-api-access-f2rx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.500896 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66a7b373-f344-4ea3-a777-7b1d1b2deaad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66a7b373-f344-4ea3-a777-7b1d1b2deaad" (UID: "66a7b373-f344-4ea3-a777-7b1d1b2deaad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.539059 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a7b373-f344-4ea3-a777-7b1d1b2deaad-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.539096 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2rx5\" (UniqueName: \"kubernetes.io/projected/66a7b373-f344-4ea3-a777-7b1d1b2deaad-kube-api-access-f2rx5\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.720311 4800 generic.go:334] "Generic (PLEG): container finished" podID="66a7b373-f344-4ea3-a777-7b1d1b2deaad" containerID="540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1" exitCode=0 Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.720426 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9m5fb" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.720513 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9m5fb" event={"ID":"66a7b373-f344-4ea3-a777-7b1d1b2deaad","Type":"ContainerDied","Data":"540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1"} Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.720559 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9m5fb" event={"ID":"66a7b373-f344-4ea3-a777-7b1d1b2deaad","Type":"ContainerDied","Data":"ad57f9126cf94183367f46e3551b999e57e88768fdbf7d1635155e18587e28ad"} Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.720590 4800 scope.go:117] "RemoveContainer" containerID="540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.747110 4800 scope.go:117] "RemoveContainer" containerID="501c5779e52c1029187a554b7ed4e2d0cfc03ba124c5046f931904fe09b406d9" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.759968 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9m5fb"] Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.771122 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9m5fb"] Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.785817 4800 scope.go:117] "RemoveContainer" containerID="fe3bed6929e2139b0085ffe736f2708c61d6013dbc2c9a7c80ff86b4927d15e6" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.836400 4800 scope.go:117] "RemoveContainer" containerID="540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1" Nov 25 18:15:26 crc kubenswrapper[4800]: E1125 18:15:26.836877 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1\": container with ID starting with 540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1 not found: ID does not exist" containerID="540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.836916 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1"} err="failed to get container status \"540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1\": rpc error: code = NotFound desc = could not find container \"540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1\": container with ID starting with 540f135454b4f6c734ce99336fecbc0dd87a89d06a8b52f12c14f0c6df33e5d1 not found: ID does not exist" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.836940 4800 scope.go:117] "RemoveContainer" containerID="501c5779e52c1029187a554b7ed4e2d0cfc03ba124c5046f931904fe09b406d9" Nov 25 18:15:26 crc kubenswrapper[4800]: E1125 18:15:26.837406 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"501c5779e52c1029187a554b7ed4e2d0cfc03ba124c5046f931904fe09b406d9\": container with ID starting with 501c5779e52c1029187a554b7ed4e2d0cfc03ba124c5046f931904fe09b406d9 not found: ID does not exist" containerID="501c5779e52c1029187a554b7ed4e2d0cfc03ba124c5046f931904fe09b406d9" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.837425 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"501c5779e52c1029187a554b7ed4e2d0cfc03ba124c5046f931904fe09b406d9"} err="failed to get container status \"501c5779e52c1029187a554b7ed4e2d0cfc03ba124c5046f931904fe09b406d9\": rpc error: code = NotFound desc = could not find container \"501c5779e52c1029187a554b7ed4e2d0cfc03ba124c5046f931904fe09b406d9\": container with ID starting with 501c5779e52c1029187a554b7ed4e2d0cfc03ba124c5046f931904fe09b406d9 not found: ID does not exist" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.837440 4800 scope.go:117] "RemoveContainer" containerID="fe3bed6929e2139b0085ffe736f2708c61d6013dbc2c9a7c80ff86b4927d15e6" Nov 25 18:15:26 crc kubenswrapper[4800]: E1125 18:15:26.837758 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe3bed6929e2139b0085ffe736f2708c61d6013dbc2c9a7c80ff86b4927d15e6\": container with ID starting with fe3bed6929e2139b0085ffe736f2708c61d6013dbc2c9a7c80ff86b4927d15e6 not found: ID does not exist" containerID="fe3bed6929e2139b0085ffe736f2708c61d6013dbc2c9a7c80ff86b4927d15e6" Nov 25 18:15:26 crc kubenswrapper[4800]: I1125 18:15:26.837780 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe3bed6929e2139b0085ffe736f2708c61d6013dbc2c9a7c80ff86b4927d15e6"} err="failed to get container status \"fe3bed6929e2139b0085ffe736f2708c61d6013dbc2c9a7c80ff86b4927d15e6\": rpc error: code = NotFound desc = could not find container \"fe3bed6929e2139b0085ffe736f2708c61d6013dbc2c9a7c80ff86b4927d15e6\": container with ID starting with fe3bed6929e2139b0085ffe736f2708c61d6013dbc2c9a7c80ff86b4927d15e6 not found: ID does not exist" Nov 25 18:15:27 crc kubenswrapper[4800]: I1125 18:15:27.799680 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66a7b373-f344-4ea3-a777-7b1d1b2deaad" path="/var/lib/kubelet/pods/66a7b373-f344-4ea3-a777-7b1d1b2deaad/volumes" Nov 25 18:15:35 crc kubenswrapper[4800]: I1125 18:15:35.275287 4800 scope.go:117] "RemoveContainer" containerID="560cc7b9e119ae79e60c282df0b3a95b8a15a38c3400bea39688440efb1ba2de" Nov 25 18:15:42 crc kubenswrapper[4800]: I1125 18:15:42.640193 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:15:42 crc kubenswrapper[4800]: I1125 18:15:42.641010 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:15:44 crc kubenswrapper[4800]: I1125 18:15:44.955473 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-68d9dc6bf6-nhg96_320fcdb8-a11c-411f-aa8c-b0c89011b857/barbican-api/0.log" Nov 25 18:15:45 crc kubenswrapper[4800]: I1125 18:15:45.221912 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-68d9dc6bf6-nhg96_320fcdb8-a11c-411f-aa8c-b0c89011b857/barbican-api-log/0.log" Nov 25 18:15:45 crc kubenswrapper[4800]: I1125 18:15:45.271943 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5d486dc894-hwkxc_c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8/barbican-keystone-listener/0.log" Nov 25 18:15:45 crc kubenswrapper[4800]: I1125 18:15:45.504687 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7664b8b7d5-cx4ps_a975dd2f-273e-4d84-8a2b-96badfae1fdb/barbican-worker/0.log" Nov 25 18:15:45 crc kubenswrapper[4800]: I1125 18:15:45.827341 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7664b8b7d5-cx4ps_a975dd2f-273e-4d84-8a2b-96badfae1fdb/barbican-worker-log/0.log" Nov 25 18:15:45 crc kubenswrapper[4800]: I1125 18:15:45.857779 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-r8n2m_0316ba41-9805-4c20-ace9-757468989756/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:15:46 crc kubenswrapper[4800]: I1125 18:15:46.053233 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2bcf406e-1184-44de-a565-974dd28d1256/ceilometer-central-agent/0.log" Nov 25 18:15:46 crc kubenswrapper[4800]: I1125 18:15:46.067982 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2bcf406e-1184-44de-a565-974dd28d1256/ceilometer-notification-agent/0.log" Nov 25 18:15:46 crc kubenswrapper[4800]: I1125 18:15:46.121174 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5d486dc894-hwkxc_c9cf92a2-5fd9-40ab-81a4-4a88d5e4bcf8/barbican-keystone-listener-log/0.log" Nov 25 18:15:46 crc kubenswrapper[4800]: I1125 18:15:46.228076 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2bcf406e-1184-44de-a565-974dd28d1256/proxy-httpd/0.log" Nov 25 18:15:46 crc kubenswrapper[4800]: I1125 18:15:46.266028 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2bcf406e-1184-44de-a565-974dd28d1256/sg-core/0.log" Nov 25 18:15:46 crc kubenswrapper[4800]: I1125 18:15:46.297958 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-edpm-deployment-openstack-edpm-ipam-j4ptq_5f049eb4-684a-4deb-8305-37d851e0431c/ceph-client-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:15:46 crc kubenswrapper[4800]: I1125 18:15:46.455409 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-tfd8l_5673027c-e855-4cd8-8ac8-ebbb5b6f0fee/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:15:46 crc kubenswrapper[4800]: I1125 18:15:46.683256 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_f9044ea8-7c07-4552-b140-6545060d3f53/cinder-api/0.log" Nov 25 18:15:46 crc kubenswrapper[4800]: I1125 18:15:46.755587 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_f9044ea8-7c07-4552-b140-6545060d3f53/cinder-api-log/0.log" Nov 25 18:15:46 crc kubenswrapper[4800]: I1125 18:15:46.892069 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_7be6b551-566a-410c-b8f9-892dee455826/cinder-backup/0.log" Nov 25 18:15:46 crc kubenswrapper[4800]: I1125 18:15:46.961047 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_7be6b551-566a-410c-b8f9-892dee455826/probe/0.log" Nov 25 18:15:47 crc kubenswrapper[4800]: I1125 18:15:47.017379 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_b3409070-5204-4027-b692-201d89bbb758/cinder-scheduler/0.log" Nov 25 18:15:47 crc kubenswrapper[4800]: I1125 18:15:47.204776 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_b3409070-5204-4027-b692-201d89bbb758/probe/0.log" Nov 25 18:15:47 crc kubenswrapper[4800]: I1125 18:15:47.339746 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_9418bcee-6bf4-4758-9ffc-ce6945012a4e/probe/0.log" Nov 25 18:15:47 crc kubenswrapper[4800]: I1125 18:15:47.372757 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_9418bcee-6bf4-4758-9ffc-ce6945012a4e/cinder-volume/0.log" Nov 25 18:15:47 crc kubenswrapper[4800]: I1125 18:15:47.502673 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-zdlbq_92286c54-cedd-4519-9b0c-f72e6b79984d/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:15:47 crc kubenswrapper[4800]: I1125 18:15:47.668333 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-cpr2g_4d64fe72-409b-48a7-88a0-0a35d4c86918/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:15:47 crc kubenswrapper[4800]: I1125 18:15:47.734027 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78f48d6b7c-6g2g2_9f91611c-ae1b-460f-a9f2-a44e2cae6143/init/0.log" Nov 25 18:15:47 crc kubenswrapper[4800]: I1125 18:15:47.946629 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78f48d6b7c-6g2g2_9f91611c-ae1b-460f-a9f2-a44e2cae6143/init/0.log" Nov 25 18:15:47 crc kubenswrapper[4800]: I1125 18:15:47.992295 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_1771d537-769e-4578-9722-3131ffc1f447/glance-httpd/0.log" Nov 25 18:15:48 crc kubenswrapper[4800]: I1125 18:15:48.183423 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78f48d6b7c-6g2g2_9f91611c-ae1b-460f-a9f2-a44e2cae6143/dnsmasq-dns/0.log" Nov 25 18:15:48 crc kubenswrapper[4800]: I1125 18:15:48.214599 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_1771d537-769e-4578-9722-3131ffc1f447/glance-log/0.log" Nov 25 18:15:48 crc kubenswrapper[4800]: I1125 18:15:48.307388 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_89177b29-2825-42dd-9746-b1b33ed4e205/glance-httpd/0.log" Nov 25 18:15:48 crc kubenswrapper[4800]: I1125 18:15:48.409086 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_89177b29-2825-42dd-9746-b1b33ed4e205/glance-log/0.log" Nov 25 18:15:48 crc kubenswrapper[4800]: I1125 18:15:48.645933 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-56fb8dbc98-w4xzj_bc0a04ce-9c18-468e-a9bb-7f8ab46f176d/horizon/0.log" Nov 25 18:15:48 crc kubenswrapper[4800]: I1125 18:15:48.880482 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-ngs4p_3c6f9a82-e6dc-4bb9-af5e-86f7c71871b1/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:15:49 crc kubenswrapper[4800]: I1125 18:15:49.034422 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-4dch6_43707459-1078-4789-9cb5-b40d41b41d97/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:15:49 crc kubenswrapper[4800]: I1125 18:15:49.296987 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401441-6vqq9_6e97e4f5-608b-41f3-94c0-bee108e519ea/keystone-cron/0.log" Nov 25 18:15:49 crc kubenswrapper[4800]: I1125 18:15:49.481522 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401501-ws7bm_3e5c3bd3-4074-4f19-8810-2c93766f0f76/keystone-cron/0.log" Nov 25 18:15:49 crc kubenswrapper[4800]: I1125 18:15:49.680085 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401561-9hv22_6a98967c-9e7a-4ada-8c02-da06d025d5c2/keystone-cron/0.log" Nov 25 18:15:49 crc kubenswrapper[4800]: I1125 18:15:49.813824 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_26eed0ca-ff5b-4fd3-9e2c-8a57ed553e2a/kube-state-metrics/0.log" Nov 25 18:15:50 crc kubenswrapper[4800]: I1125 18:15:50.004174 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-czdww_631c307a-96b7-4e9f-829d-2652277cbea1/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:15:50 crc kubenswrapper[4800]: I1125 18:15:50.241474 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_2533dec9-48e6-4f7e-8d9e-d90e5db00418/manila-api-log/0.log" Nov 25 18:15:50 crc kubenswrapper[4800]: I1125 18:15:50.378142 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_2533dec9-48e6-4f7e-8d9e-d90e5db00418/manila-api/0.log" Nov 25 18:15:50 crc kubenswrapper[4800]: I1125 18:15:50.613322 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_ff783144-a701-4dd1-b275-89049f1e49d1/probe/0.log" Nov 25 18:15:50 crc kubenswrapper[4800]: I1125 18:15:50.650690 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_ff783144-a701-4dd1-b275-89049f1e49d1/manila-scheduler/0.log" Nov 25 18:15:50 crc kubenswrapper[4800]: I1125 18:15:50.777502 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-56fb8dbc98-w4xzj_bc0a04ce-9c18-468e-a9bb-7f8ab46f176d/horizon-log/0.log" Nov 25 18:15:50 crc kubenswrapper[4800]: I1125 18:15:50.890616 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_c50c89bb-6472-4178-a4db-32b109ae9847/probe/0.log" Nov 25 18:15:50 crc kubenswrapper[4800]: I1125 18:15:50.991653 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_c50c89bb-6472-4178-a4db-32b109ae9847/manila-share/0.log" Nov 25 18:15:51 crc kubenswrapper[4800]: I1125 18:15:51.574775 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-7b7dg_3d4ff997-b0ab-44c1-8d74-3c326d41863d/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:15:52 crc kubenswrapper[4800]: I1125 18:15:52.267587 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-97869bf49-tc9dz_331a354f-72b7-47a2-8cd4-212972eada6b/neutron-httpd/0.log" Nov 25 18:15:52 crc kubenswrapper[4800]: I1125 18:15:52.306641 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7dfbf776bb-kgx2k_52ce962a-182b-48b4-96ee-225161f70f29/keystone-api/0.log" Nov 25 18:15:52 crc kubenswrapper[4800]: I1125 18:15:52.943799 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-97869bf49-tc9dz_331a354f-72b7-47a2-8cd4-212972eada6b/neutron-api/0.log" Nov 25 18:15:53 crc kubenswrapper[4800]: I1125 18:15:53.253910 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_b71b2770-3cf3-4621-880f-e8e39e94771d/nova-cell0-conductor-conductor/0.log" Nov 25 18:15:53 crc kubenswrapper[4800]: I1125 18:15:53.520125 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_3046bf0c-3466-4a9a-9c78-84b1b5f8d164/nova-cell1-conductor-conductor/0.log" Nov 25 18:15:54 crc kubenswrapper[4800]: I1125 18:15:54.275545 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_418ea34b-91e7-4bed-852c-2856c9c414d1/nova-cell1-novncproxy-novncproxy/0.log" Nov 25 18:15:54 crc kubenswrapper[4800]: I1125 18:15:54.572498 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-ljxt9_a04be264-1764-4ff6-b676-688fdb0ced55/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:15:54 crc kubenswrapper[4800]: I1125 18:15:54.885425 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_9192bcbd-cbd0-4697-b97a-ccbd71fccb54/nova-metadata-log/0.log" Nov 25 18:15:55 crc kubenswrapper[4800]: I1125 18:15:55.922350 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_6b5fce4f-89c0-47c1-a5c5-c4a86406502d/nova-scheduler-scheduler/0.log" Nov 25 18:15:55 crc kubenswrapper[4800]: I1125 18:15:55.986254 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9/nova-api-log/0.log" Nov 25 18:15:56 crc kubenswrapper[4800]: I1125 18:15:56.183962 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0028cc49-034e-4ff3-99c1-7c13bb298646/mysql-bootstrap/0.log" Nov 25 18:15:56 crc kubenswrapper[4800]: I1125 18:15:56.437416 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0028cc49-034e-4ff3-99c1-7c13bb298646/mysql-bootstrap/0.log" Nov 25 18:15:56 crc kubenswrapper[4800]: I1125 18:15:56.490135 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0028cc49-034e-4ff3-99c1-7c13bb298646/galera/0.log" Nov 25 18:15:56 crc kubenswrapper[4800]: I1125 18:15:56.695791 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_6b4060af-fd4c-49d5-980e-a496a2fcfbd5/mysql-bootstrap/0.log" Nov 25 18:15:56 crc kubenswrapper[4800]: I1125 18:15:56.869746 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_6b4060af-fd4c-49d5-980e-a496a2fcfbd5/mysql-bootstrap/0.log" Nov 25 18:15:56 crc kubenswrapper[4800]: I1125 18:15:56.954239 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_6b4060af-fd4c-49d5-980e-a496a2fcfbd5/galera/0.log" Nov 25 18:15:57 crc kubenswrapper[4800]: I1125 18:15:57.178831 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_504ea677-7f03-49bd-a420-ab472ab48709/openstackclient/0.log" Nov 25 18:15:57 crc kubenswrapper[4800]: I1125 18:15:57.388240 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-jmbtv_f0140b9d-bed7-44ae-a1d5-8e0acdb70742/ovn-controller/0.log" Nov 25 18:15:57 crc kubenswrapper[4800]: I1125 18:15:57.567684 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-7jcg4_b7276fee-e7c9-4661-bc71-c1d2a4d4593e/openstack-network-exporter/0.log" Nov 25 18:15:57 crc kubenswrapper[4800]: I1125 18:15:57.718183 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_30b2fb06-e3c2-4a1d-bd5c-440248ffb8c9/nova-api-api/0.log" Nov 25 18:15:57 crc kubenswrapper[4800]: I1125 18:15:57.829384 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tklmv_1ead9ef3-a389-45a2-a1be-0b1d07116fde/ovsdb-server-init/0.log" Nov 25 18:15:57 crc kubenswrapper[4800]: I1125 18:15:57.983157 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tklmv_1ead9ef3-a389-45a2-a1be-0b1d07116fde/ovsdb-server-init/0.log" Nov 25 18:15:58 crc kubenswrapper[4800]: I1125 18:15:58.027787 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tklmv_1ead9ef3-a389-45a2-a1be-0b1d07116fde/ovsdb-server/0.log" Nov 25 18:15:58 crc kubenswrapper[4800]: I1125 18:15:58.101896 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tklmv_1ead9ef3-a389-45a2-a1be-0b1d07116fde/ovs-vswitchd/0.log" Nov 25 18:15:58 crc kubenswrapper[4800]: I1125 18:15:58.328830 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-4mt25_f1b959a3-4fef-48f0-8562-861d6acd7b9c/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:15:58 crc kubenswrapper[4800]: I1125 18:15:58.470814 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_2ba620e1-8d84-4ce2-acca-5d0b2df703d0/openstack-network-exporter/0.log" Nov 25 18:15:58 crc kubenswrapper[4800]: I1125 18:15:58.568613 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_2ba620e1-8d84-4ce2-acca-5d0b2df703d0/ovn-northd/0.log" Nov 25 18:15:58 crc kubenswrapper[4800]: I1125 18:15:58.687869 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_73dd3869-2591-41f2-8164-004d29e14e44/openstack-network-exporter/0.log" Nov 25 18:15:58 crc kubenswrapper[4800]: I1125 18:15:58.743404 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_73dd3869-2591-41f2-8164-004d29e14e44/ovsdbserver-nb/0.log" Nov 25 18:15:58 crc kubenswrapper[4800]: I1125 18:15:58.945182 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c/ovsdbserver-sb/0.log" Nov 25 18:15:58 crc kubenswrapper[4800]: I1125 18:15:58.981023 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_9d7ac5c2-e9e5-4f6f-b992-0e752f34a33c/openstack-network-exporter/0.log" Nov 25 18:15:59 crc kubenswrapper[4800]: I1125 18:15:59.590291 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f33262f7-29fd-4207-b465-558a4027c20a/setup-container/0.log" Nov 25 18:15:59 crc kubenswrapper[4800]: I1125 18:15:59.701593 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6c9d4bc54d-drmz2_ec5af7dd-2606-4607-b136-51a82b3e4ad8/placement-api/0.log" Nov 25 18:15:59 crc kubenswrapper[4800]: I1125 18:15:59.975553 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f33262f7-29fd-4207-b465-558a4027c20a/setup-container/0.log" Nov 25 18:15:59 crc kubenswrapper[4800]: I1125 18:15:59.989981 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6c9d4bc54d-drmz2_ec5af7dd-2606-4607-b136-51a82b3e4ad8/placement-log/0.log" Nov 25 18:16:00 crc kubenswrapper[4800]: I1125 18:16:00.125196 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f33262f7-29fd-4207-b465-558a4027c20a/rabbitmq/0.log" Nov 25 18:16:00 crc kubenswrapper[4800]: I1125 18:16:00.157601 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_46c6538a-1632-4c14-9ef6-3a3e4a15c3d4/setup-container/0.log" Nov 25 18:16:00 crc kubenswrapper[4800]: I1125 18:16:00.405276 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_46c6538a-1632-4c14-9ef6-3a3e4a15c3d4/setup-container/0.log" Nov 25 18:16:00 crc kubenswrapper[4800]: I1125 18:16:00.408900 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_9192bcbd-cbd0-4697-b97a-ccbd71fccb54/nova-metadata-metadata/0.log" Nov 25 18:16:00 crc kubenswrapper[4800]: I1125 18:16:00.550057 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_46c6538a-1632-4c14-9ef6-3a3e4a15c3d4/rabbitmq/0.log" Nov 25 18:16:00 crc kubenswrapper[4800]: I1125 18:16:00.645739 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-z7864_44834c3e-e154-47ea-9c26-62f7f7ee5cb8/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:16:00 crc kubenswrapper[4800]: I1125 18:16:00.802047 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-dh5hv_abeab1aa-d713-443d-b487-9a59f90d161a/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:16:00 crc kubenswrapper[4800]: I1125 18:16:00.824363 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-qwbdv_932a8ec2-09ef-4b4e-8dc1-0a2342efb164/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:16:01 crc kubenswrapper[4800]: I1125 18:16:01.036132 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-ddspx_1c5fea3a-9dfb-4d9f-8401-d9769c59d563/ssh-known-hosts-edpm-deployment/0.log" Nov 25 18:16:01 crc kubenswrapper[4800]: I1125 18:16:01.229294 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest-s00-full_9f498125-ffd2-4526-8234-3e89d84f5753/tempest-tests-tempest-tests-runner/0.log" Nov 25 18:16:01 crc kubenswrapper[4800]: I1125 18:16:01.329940 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest-s01-single-test_2d3475e1-4d5f-4a7e-b8d1-e482ef1e7b0f/tempest-tests-tempest-tests-runner/0.log" Nov 25 18:16:01 crc kubenswrapper[4800]: I1125 18:16:01.394000 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_e5062a21-fde6-4339-87c9-268d93f7b2a1/test-operator-logs-container/0.log" Nov 25 18:16:01 crc kubenswrapper[4800]: I1125 18:16:01.543434 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tobiko-tobiko-tests-tobiko_7e8bfbb6-72be-47cb-bde0-3fc5d7264e02/test-operator-logs-container/0.log" Nov 25 18:16:01 crc kubenswrapper[4800]: I1125 18:16:01.644165 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tobiko-tests-tobiko-s00-podified-functional_1b0af985-22c6-472c-99fd-aa42bee61e14/tobiko-tests-tobiko/0.log" Nov 25 18:16:01 crc kubenswrapper[4800]: I1125 18:16:01.753612 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tobiko-tests-tobiko-s01-sanity_b25425bd-52d1-42fe-837a-99f02547084f/tobiko-tests-tobiko/0.log" Nov 25 18:16:02 crc kubenswrapper[4800]: I1125 18:16:02.342217 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-pf5cd_4c1aa378-a5fb-4c41-b773-e77118db1abe/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 18:16:09 crc kubenswrapper[4800]: I1125 18:16:09.069973 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_f6fe0af4-a236-4346-a806-8601ecaa33b6/memcached/0.log" Nov 25 18:16:12 crc kubenswrapper[4800]: I1125 18:16:12.640553 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:16:12 crc kubenswrapper[4800]: I1125 18:16:12.641075 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:16:28 crc kubenswrapper[4800]: I1125 18:16:28.803000 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-d4svd_959b58dd-55f0-4f7a-aa2e-24a868241ebe/kube-rbac-proxy/0.log" Nov 25 18:16:28 crc kubenswrapper[4800]: I1125 18:16:28.865274 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-d4svd_959b58dd-55f0-4f7a-aa2e-24a868241ebe/manager/0.log" Nov 25 18:16:28 crc kubenswrapper[4800]: I1125 18:16:28.977391 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j_c29c04aa-31cb-498e-b976-ce6f9d381ba2/util/0.log" Nov 25 18:16:29 crc kubenswrapper[4800]: I1125 18:16:29.219659 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j_c29c04aa-31cb-498e-b976-ce6f9d381ba2/util/0.log" Nov 25 18:16:29 crc kubenswrapper[4800]: I1125 18:16:29.225337 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j_c29c04aa-31cb-498e-b976-ce6f9d381ba2/pull/0.log" Nov 25 18:16:29 crc kubenswrapper[4800]: I1125 18:16:29.229128 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j_c29c04aa-31cb-498e-b976-ce6f9d381ba2/pull/0.log" Nov 25 18:16:29 crc kubenswrapper[4800]: I1125 18:16:29.448121 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j_c29c04aa-31cb-498e-b976-ce6f9d381ba2/extract/0.log" Nov 25 18:16:29 crc kubenswrapper[4800]: I1125 18:16:29.489971 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j_c29c04aa-31cb-498e-b976-ce6f9d381ba2/util/0.log" Nov 25 18:16:29 crc kubenswrapper[4800]: I1125 18:16:29.497873 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fggd2j_c29c04aa-31cb-498e-b976-ce6f9d381ba2/pull/0.log" Nov 25 18:16:29 crc kubenswrapper[4800]: I1125 18:16:29.672099 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-gnfx4_05bb3a64-18f5-4b8d-bf4f-f46c5ba6c0ec/kube-rbac-proxy/0.log" Nov 25 18:16:29 crc kubenswrapper[4800]: I1125 18:16:29.705785 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-gnfx4_05bb3a64-18f5-4b8d-bf4f-f46c5ba6c0ec/manager/0.log" Nov 25 18:16:29 crc kubenswrapper[4800]: I1125 18:16:29.916570 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-vdqnx_8976a97d-112c-4d56-b82f-74648f987a62/kube-rbac-proxy/0.log" Nov 25 18:16:30 crc kubenswrapper[4800]: I1125 18:16:30.007475 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-vdqnx_8976a97d-112c-4d56-b82f-74648f987a62/manager/0.log" Nov 25 18:16:30 crc kubenswrapper[4800]: I1125 18:16:30.081589 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-6bztx_b3ae53a0-88c1-4617-8052-f95d3b6d78d3/kube-rbac-proxy/0.log" Nov 25 18:16:30 crc kubenswrapper[4800]: I1125 18:16:30.144614 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-6bztx_b3ae53a0-88c1-4617-8052-f95d3b6d78d3/manager/0.log" Nov 25 18:16:30 crc kubenswrapper[4800]: I1125 18:16:30.243529 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-qfdl8_cb7f9b0c-c801-4935-8d52-02179a0cfed0/kube-rbac-proxy/0.log" Nov 25 18:16:30 crc kubenswrapper[4800]: I1125 18:16:30.283792 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-qfdl8_cb7f9b0c-c801-4935-8d52-02179a0cfed0/manager/0.log" Nov 25 18:16:30 crc kubenswrapper[4800]: I1125 18:16:30.406917 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-7kp74_e158909e-b254-40c0-95a8-9d5056889e6a/kube-rbac-proxy/0.log" Nov 25 18:16:30 crc kubenswrapper[4800]: I1125 18:16:30.435501 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-7kp74_e158909e-b254-40c0-95a8-9d5056889e6a/manager/0.log" Nov 25 18:16:31 crc kubenswrapper[4800]: I1125 18:16:31.297203 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-ksrss_f2d7618e-4f44-4ad7-b381-26039921a683/kube-rbac-proxy/0.log" Nov 25 18:16:31 crc kubenswrapper[4800]: I1125 18:16:31.455875 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-ksrss_f2d7618e-4f44-4ad7-b381-26039921a683/manager/0.log" Nov 25 18:16:31 crc kubenswrapper[4800]: I1125 18:16:31.522151 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-ng4ng_1d4a540a-f8e3-4566-9d9f-05b2b5e26399/kube-rbac-proxy/0.log" Nov 25 18:16:31 crc kubenswrapper[4800]: I1125 18:16:31.550364 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-ng4ng_1d4a540a-f8e3-4566-9d9f-05b2b5e26399/manager/0.log" Nov 25 18:16:31 crc kubenswrapper[4800]: I1125 18:16:31.702715 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-6clsb_c13855f7-d2e2-4a35-a7f0-2fe506ad36a5/kube-rbac-proxy/0.log" Nov 25 18:16:31 crc kubenswrapper[4800]: I1125 18:16:31.774385 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-6clsb_c13855f7-d2e2-4a35-a7f0-2fe506ad36a5/manager/0.log" Nov 25 18:16:31 crc kubenswrapper[4800]: I1125 18:16:31.917478 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-r79jq_4682bc2d-38c7-4001-8dd8-095f444caa42/kube-rbac-proxy/0.log" Nov 25 18:16:31 crc kubenswrapper[4800]: I1125 18:16:31.991998 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-r79jq_4682bc2d-38c7-4001-8dd8-095f444caa42/manager/0.log" Nov 25 18:16:32 crc kubenswrapper[4800]: I1125 18:16:32.012290 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-865xl_bf54f59d-2a26-4502-bb7d-b9aeabeb1645/kube-rbac-proxy/0.log" Nov 25 18:16:32 crc kubenswrapper[4800]: I1125 18:16:32.117931 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-865xl_bf54f59d-2a26-4502-bb7d-b9aeabeb1645/manager/0.log" Nov 25 18:16:32 crc kubenswrapper[4800]: I1125 18:16:32.190966 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-9mbm7_a206eabc-2689-4dc2-ac1a-066100be9382/kube-rbac-proxy/0.log" Nov 25 18:16:32 crc kubenswrapper[4800]: I1125 18:16:32.305202 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-9mbm7_a206eabc-2689-4dc2-ac1a-066100be9382/manager/0.log" Nov 25 18:16:32 crc kubenswrapper[4800]: I1125 18:16:32.371931 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-brcmf_671d7b5e-65d1-4a29-9ef6-fd0e770203c5/kube-rbac-proxy/0.log" Nov 25 18:16:32 crc kubenswrapper[4800]: I1125 18:16:32.481709 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-brcmf_671d7b5e-65d1-4a29-9ef6-fd0e770203c5/manager/0.log" Nov 25 18:16:32 crc kubenswrapper[4800]: I1125 18:16:32.826217 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-5xcgj_16bad9b7-305a-4081-a7f5-671fd1a51f31/manager/0.log" Nov 25 18:16:32 crc kubenswrapper[4800]: I1125 18:16:32.848072 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-5xcgj_16bad9b7-305a-4081-a7f5-671fd1a51f31/kube-rbac-proxy/0.log" Nov 25 18:16:32 crc kubenswrapper[4800]: I1125 18:16:32.874292 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-b58f89467-5w9fm_e09fc035-4c04-486d-b4e7-6638d278c1d6/manager/0.log" Nov 25 18:16:32 crc kubenswrapper[4800]: I1125 18:16:32.891572 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-b58f89467-5w9fm_e09fc035-4c04-486d-b4e7-6638d278c1d6/kube-rbac-proxy/0.log" Nov 25 18:16:33 crc kubenswrapper[4800]: I1125 18:16:33.130499 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5ff895d7c-qbck9_af1e8320-14a4-4a15-b29c-39a09ce9bfb9/operator/0.log" Nov 25 18:16:33 crc kubenswrapper[4800]: I1125 18:16:33.415583 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-2fb8f_19a270d9-7165-4dae-942a-5a6daa2cf905/kube-rbac-proxy/0.log" Nov 25 18:16:33 crc kubenswrapper[4800]: I1125 18:16:33.420946 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-2fb8f_19a270d9-7165-4dae-942a-5a6daa2cf905/manager/0.log" Nov 25 18:16:33 crc kubenswrapper[4800]: I1125 18:16:33.459192 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-6997m_8eb527be-d4e7-4803-8ce9-88201bb4e17e/registry-server/0.log" Nov 25 18:16:33 crc kubenswrapper[4800]: I1125 18:16:33.617361 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-vhrgb_b547724f-2a34-47b0-9125-668496d7dc6d/manager/0.log" Nov 25 18:16:33 crc kubenswrapper[4800]: I1125 18:16:33.869796 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-vhrgb_b547724f-2a34-47b0-9125-668496d7dc6d/kube-rbac-proxy/0.log" Nov 25 18:16:33 crc kubenswrapper[4800]: I1125 18:16:33.965507 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-wbx2v_762dc32c-7527-4ab4-a5fc-b7780e6da7d2/operator/0.log" Nov 25 18:16:34 crc kubenswrapper[4800]: I1125 18:16:34.108295 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-gt68p_fbc462d8-f085-4ffc-af8c-b91677ff3619/kube-rbac-proxy/0.log" Nov 25 18:16:34 crc kubenswrapper[4800]: I1125 18:16:34.200648 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-gt68p_fbc462d8-f085-4ffc-af8c-b91677ff3619/manager/0.log" Nov 25 18:16:34 crc kubenswrapper[4800]: I1125 18:16:34.265188 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-567f98c9d-pbs6h_e9539fdf-f01c-42c5-89a2-681d5c6142b4/kube-rbac-proxy/0.log" Nov 25 18:16:34 crc kubenswrapper[4800]: I1125 18:16:34.432447 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69c4569b4-2wcmd_8394c97f-b95f-41cd-8baa-b6bdb3a2219a/kube-rbac-proxy/0.log" Nov 25 18:16:34 crc kubenswrapper[4800]: I1125 18:16:34.434390 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7cd5954d9-hh5m4_5a433244-abb3-4d43-a2b3-3266fd7234c0/manager/0.log" Nov 25 18:16:34 crc kubenswrapper[4800]: I1125 18:16:34.460703 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-567f98c9d-pbs6h_e9539fdf-f01c-42c5-89a2-681d5c6142b4/manager/0.log" Nov 25 18:16:34 crc kubenswrapper[4800]: I1125 18:16:34.516735 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69c4569b4-2wcmd_8394c97f-b95f-41cd-8baa-b6bdb3a2219a/manager/0.log" Nov 25 18:16:34 crc kubenswrapper[4800]: I1125 18:16:34.635043 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-95x9b_71dd46d2-b3b0-4999-800c-03ac0a9758c6/kube-rbac-proxy/0.log" Nov 25 18:16:34 crc kubenswrapper[4800]: I1125 18:16:34.651929 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-95x9b_71dd46d2-b3b0-4999-800c-03ac0a9758c6/manager/0.log" Nov 25 18:16:42 crc kubenswrapper[4800]: I1125 18:16:42.640202 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:16:42 crc kubenswrapper[4800]: I1125 18:16:42.640652 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:16:42 crc kubenswrapper[4800]: I1125 18:16:42.640710 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 18:16:42 crc kubenswrapper[4800]: I1125 18:16:42.641605 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a8c4ea772f008841729175b31f60a576a8aa05aa7616cac9253311e58f50fab7"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:16:42 crc kubenswrapper[4800]: I1125 18:16:42.641707 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://a8c4ea772f008841729175b31f60a576a8aa05aa7616cac9253311e58f50fab7" gracePeriod=600 Nov 25 18:16:43 crc kubenswrapper[4800]: I1125 18:16:43.447315 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="a8c4ea772f008841729175b31f60a576a8aa05aa7616cac9253311e58f50fab7" exitCode=0 Nov 25 18:16:43 crc kubenswrapper[4800]: I1125 18:16:43.447413 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"a8c4ea772f008841729175b31f60a576a8aa05aa7616cac9253311e58f50fab7"} Nov 25 18:16:43 crc kubenswrapper[4800]: I1125 18:16:43.447695 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30"} Nov 25 18:16:43 crc kubenswrapper[4800]: I1125 18:16:43.447717 4800 scope.go:117] "RemoveContainer" containerID="e63c18b910ec9350e6bca6395dcaf098339cdf273a0276d89a999a2cc62eaebf" Nov 25 18:16:53 crc kubenswrapper[4800]: I1125 18:16:53.275353 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-l7wcv_dd18f589-9ad4-4626-962c-11632f7750ec/control-plane-machine-set-operator/0.log" Nov 25 18:16:53 crc kubenswrapper[4800]: I1125 18:16:53.379366 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-gjqqh_a451496e-aec1-4381-916e-d9875d29dbd2/kube-rbac-proxy/0.log" Nov 25 18:16:53 crc kubenswrapper[4800]: I1125 18:16:53.420678 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-gjqqh_a451496e-aec1-4381-916e-d9875d29dbd2/machine-api-operator/0.log" Nov 25 18:17:06 crc kubenswrapper[4800]: I1125 18:17:06.426971 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-mhrc8_ee4fea7b-32ed-4315-8b7b-9bafd32a6ebb/cert-manager-controller/0.log" Nov 25 18:17:06 crc kubenswrapper[4800]: I1125 18:17:06.658365 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-z579p_c42ac87c-4158-4d13-99f5-634729b126dd/cert-manager-webhook/0.log" Nov 25 18:17:06 crc kubenswrapper[4800]: I1125 18:17:06.732107 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-24s7f_68f1e02e-3adc-4214-bcce-8d3fea0e02ef/cert-manager-cainjector/0.log" Nov 25 18:17:19 crc kubenswrapper[4800]: I1125 18:17:19.934957 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-krpt4_3d4b604f-f606-4565-9094-d61a6c3275f1/nmstate-console-plugin/0.log" Nov 25 18:17:20 crc kubenswrapper[4800]: I1125 18:17:20.001830 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-gbvvs_b4293598-b73e-407c-a146-3dcc03673ff6/nmstate-handler/0.log" Nov 25 18:17:20 crc kubenswrapper[4800]: I1125 18:17:20.107583 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fqbmm_6120d1d1-7abc-494b-8e0f-da1ac9b5324f/kube-rbac-proxy/0.log" Nov 25 18:17:20 crc kubenswrapper[4800]: I1125 18:17:20.116466 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fqbmm_6120d1d1-7abc-494b-8e0f-da1ac9b5324f/nmstate-metrics/0.log" Nov 25 18:17:20 crc kubenswrapper[4800]: I1125 18:17:20.268118 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-cmjwd_b0d67bab-8969-4b12-a7f1-e37e02e45afa/nmstate-operator/0.log" Nov 25 18:17:20 crc kubenswrapper[4800]: I1125 18:17:20.297612 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-629l4_ee71ff08-0d60-4527-8892-b804feba7c02/nmstate-webhook/0.log" Nov 25 18:17:35 crc kubenswrapper[4800]: I1125 18:17:35.248487 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-snhhr_a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8/kube-rbac-proxy/0.log" Nov 25 18:17:35 crc kubenswrapper[4800]: I1125 18:17:35.369783 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-snhhr_a9e4cf5b-2e7f-448e-8e0a-6bd74b29b0d8/controller/0.log" Nov 25 18:17:35 crc kubenswrapper[4800]: I1125 18:17:35.434994 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/cp-frr-files/0.log" Nov 25 18:17:35 crc kubenswrapper[4800]: I1125 18:17:35.584670 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/cp-reloader/0.log" Nov 25 18:17:35 crc kubenswrapper[4800]: I1125 18:17:35.603031 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/cp-metrics/0.log" Nov 25 18:17:35 crc kubenswrapper[4800]: I1125 18:17:35.639415 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/cp-reloader/0.log" Nov 25 18:17:35 crc kubenswrapper[4800]: I1125 18:17:35.675783 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/cp-frr-files/0.log" Nov 25 18:17:35 crc kubenswrapper[4800]: I1125 18:17:35.778056 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/cp-reloader/0.log" Nov 25 18:17:35 crc kubenswrapper[4800]: I1125 18:17:35.795991 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/cp-frr-files/0.log" Nov 25 18:17:35 crc kubenswrapper[4800]: I1125 18:17:35.869537 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/cp-metrics/0.log" Nov 25 18:17:35 crc kubenswrapper[4800]: I1125 18:17:35.901408 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/cp-metrics/0.log" Nov 25 18:17:36 crc kubenswrapper[4800]: I1125 18:17:36.011579 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/cp-frr-files/0.log" Nov 25 18:17:36 crc kubenswrapper[4800]: I1125 18:17:36.052606 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/cp-metrics/0.log" Nov 25 18:17:36 crc kubenswrapper[4800]: I1125 18:17:36.064473 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/cp-reloader/0.log" Nov 25 18:17:36 crc kubenswrapper[4800]: I1125 18:17:36.125681 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/controller/0.log" Nov 25 18:17:36 crc kubenswrapper[4800]: I1125 18:17:36.256278 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/kube-rbac-proxy/0.log" Nov 25 18:17:36 crc kubenswrapper[4800]: I1125 18:17:36.262829 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/frr-metrics/0.log" Nov 25 18:17:36 crc kubenswrapper[4800]: I1125 18:17:36.337480 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/kube-rbac-proxy-frr/0.log" Nov 25 18:17:36 crc kubenswrapper[4800]: I1125 18:17:36.529786 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-p7rt2_05323c34-8333-474b-9713-a1b20ea27b72/frr-k8s-webhook-server/0.log" Nov 25 18:17:36 crc kubenswrapper[4800]: I1125 18:17:36.546963 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/reloader/0.log" Nov 25 18:17:36 crc kubenswrapper[4800]: I1125 18:17:36.770194 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-667b8c5d74-psh88_3b8101e4-3103-4602-ba9d-8a43d88566e6/manager/0.log" Nov 25 18:17:36 crc kubenswrapper[4800]: I1125 18:17:36.922193 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7c85cdc97d-mz6j5_8d07d578-289f-40fa-9e41-fc065151089c/webhook-server/0.log" Nov 25 18:17:37 crc kubenswrapper[4800]: I1125 18:17:37.032122 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-r7gdx_74dd80b5-113a-476d-8f3d-dd49dfb10e8e/kube-rbac-proxy/0.log" Nov 25 18:17:37 crc kubenswrapper[4800]: I1125 18:17:37.636282 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-r7gdx_74dd80b5-113a-476d-8f3d-dd49dfb10e8e/speaker/0.log" Nov 25 18:17:38 crc kubenswrapper[4800]: I1125 18:17:38.640162 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pjc8l_f37ddcfd-23c7-4052-8a17-4ea5fe5de78e/frr/0.log" Nov 25 18:17:51 crc kubenswrapper[4800]: I1125 18:17:51.158020 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7_435cdb5d-d3d7-4bd1-bda3-a6994c189210/util/0.log" Nov 25 18:17:51 crc kubenswrapper[4800]: I1125 18:17:51.370578 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7_435cdb5d-d3d7-4bd1-bda3-a6994c189210/pull/0.log" Nov 25 18:17:51 crc kubenswrapper[4800]: I1125 18:17:51.370645 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7_435cdb5d-d3d7-4bd1-bda3-a6994c189210/pull/0.log" Nov 25 18:17:51 crc kubenswrapper[4800]: I1125 18:17:51.412580 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7_435cdb5d-d3d7-4bd1-bda3-a6994c189210/util/0.log" Nov 25 18:17:51 crc kubenswrapper[4800]: I1125 18:17:51.572028 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7_435cdb5d-d3d7-4bd1-bda3-a6994c189210/extract/0.log" Nov 25 18:17:51 crc kubenswrapper[4800]: I1125 18:17:51.580332 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7_435cdb5d-d3d7-4bd1-bda3-a6994c189210/pull/0.log" Nov 25 18:17:51 crc kubenswrapper[4800]: I1125 18:17:51.623299 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eznjx7_435cdb5d-d3d7-4bd1-bda3-a6994c189210/util/0.log" Nov 25 18:17:51 crc kubenswrapper[4800]: I1125 18:17:51.745341 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vw48p_880af8be-a38e-4540-ae90-f0bf16724c49/extract-utilities/0.log" Nov 25 18:17:51 crc kubenswrapper[4800]: I1125 18:17:51.901427 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vw48p_880af8be-a38e-4540-ae90-f0bf16724c49/extract-content/0.log" Nov 25 18:17:51 crc kubenswrapper[4800]: I1125 18:17:51.930922 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vw48p_880af8be-a38e-4540-ae90-f0bf16724c49/extract-utilities/0.log" Nov 25 18:17:51 crc kubenswrapper[4800]: I1125 18:17:51.931450 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vw48p_880af8be-a38e-4540-ae90-f0bf16724c49/extract-content/0.log" Nov 25 18:17:52 crc kubenswrapper[4800]: I1125 18:17:52.111996 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vw48p_880af8be-a38e-4540-ae90-f0bf16724c49/extract-content/0.log" Nov 25 18:17:52 crc kubenswrapper[4800]: I1125 18:17:52.116420 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vw48p_880af8be-a38e-4540-ae90-f0bf16724c49/extract-utilities/0.log" Nov 25 18:17:52 crc kubenswrapper[4800]: I1125 18:17:52.209773 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vw48p_880af8be-a38e-4540-ae90-f0bf16724c49/registry-server/0.log" Nov 25 18:17:52 crc kubenswrapper[4800]: I1125 18:17:52.252564 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2fwvl_28578f18-346a-4afa-b73d-45b7faee6330/extract-utilities/0.log" Nov 25 18:17:52 crc kubenswrapper[4800]: I1125 18:17:52.457704 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2fwvl_28578f18-346a-4afa-b73d-45b7faee6330/extract-content/0.log" Nov 25 18:17:52 crc kubenswrapper[4800]: I1125 18:17:52.493874 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2fwvl_28578f18-346a-4afa-b73d-45b7faee6330/extract-utilities/0.log" Nov 25 18:17:52 crc kubenswrapper[4800]: I1125 18:17:52.503547 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2fwvl_28578f18-346a-4afa-b73d-45b7faee6330/extract-content/0.log" Nov 25 18:17:52 crc kubenswrapper[4800]: I1125 18:17:52.719952 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2fwvl_28578f18-346a-4afa-b73d-45b7faee6330/extract-content/0.log" Nov 25 18:17:52 crc kubenswrapper[4800]: I1125 18:17:52.746022 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2fwvl_28578f18-346a-4afa-b73d-45b7faee6330/extract-utilities/0.log" Nov 25 18:17:52 crc kubenswrapper[4800]: I1125 18:17:52.913282 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt_e59677c5-d37c-41e3-a083-2102f5e79f5d/util/0.log" Nov 25 18:17:53 crc kubenswrapper[4800]: I1125 18:17:53.162214 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt_e59677c5-d37c-41e3-a083-2102f5e79f5d/util/0.log" Nov 25 18:17:53 crc kubenswrapper[4800]: I1125 18:17:53.191667 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt_e59677c5-d37c-41e3-a083-2102f5e79f5d/pull/0.log" Nov 25 18:17:53 crc kubenswrapper[4800]: I1125 18:17:53.221622 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt_e59677c5-d37c-41e3-a083-2102f5e79f5d/pull/0.log" Nov 25 18:17:53 crc kubenswrapper[4800]: I1125 18:17:53.469173 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt_e59677c5-d37c-41e3-a083-2102f5e79f5d/util/0.log" Nov 25 18:17:53 crc kubenswrapper[4800]: I1125 18:17:53.667792 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt_e59677c5-d37c-41e3-a083-2102f5e79f5d/extract/0.log" Nov 25 18:17:53 crc kubenswrapper[4800]: I1125 18:17:53.671179 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c692twt_e59677c5-d37c-41e3-a083-2102f5e79f5d/pull/0.log" Nov 25 18:17:53 crc kubenswrapper[4800]: I1125 18:17:53.959536 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-fhrvr_e6a5f505-c1f9-471a-b60a-97a39222f7bb/marketplace-operator/0.log" Nov 25 18:17:54 crc kubenswrapper[4800]: I1125 18:17:54.190470 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-w84s6_2e7bd884-1b29-4700-912b-d934384c1fec/extract-utilities/0.log" Nov 25 18:17:54 crc kubenswrapper[4800]: I1125 18:17:54.419310 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-w84s6_2e7bd884-1b29-4700-912b-d934384c1fec/extract-utilities/0.log" Nov 25 18:17:54 crc kubenswrapper[4800]: I1125 18:17:54.462414 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-w84s6_2e7bd884-1b29-4700-912b-d934384c1fec/extract-content/0.log" Nov 25 18:17:54 crc kubenswrapper[4800]: I1125 18:17:54.470063 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-w84s6_2e7bd884-1b29-4700-912b-d934384c1fec/extract-content/0.log" Nov 25 18:17:54 crc kubenswrapper[4800]: I1125 18:17:54.519101 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2fwvl_28578f18-346a-4afa-b73d-45b7faee6330/registry-server/0.log" Nov 25 18:17:54 crc kubenswrapper[4800]: I1125 18:17:54.674892 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-w84s6_2e7bd884-1b29-4700-912b-d934384c1fec/extract-content/0.log" Nov 25 18:17:54 crc kubenswrapper[4800]: I1125 18:17:54.739474 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-w84s6_2e7bd884-1b29-4700-912b-d934384c1fec/extract-utilities/0.log" Nov 25 18:17:54 crc kubenswrapper[4800]: I1125 18:17:54.788472 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-24prz_c98cb101-07e4-44a6-972a-7d6cb9cedfe9/extract-utilities/0.log" Nov 25 18:17:54 crc kubenswrapper[4800]: I1125 18:17:54.950117 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-24prz_c98cb101-07e4-44a6-972a-7d6cb9cedfe9/extract-content/0.log" Nov 25 18:17:54 crc kubenswrapper[4800]: I1125 18:17:54.982350 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-24prz_c98cb101-07e4-44a6-972a-7d6cb9cedfe9/extract-utilities/0.log" Nov 25 18:17:54 crc kubenswrapper[4800]: I1125 18:17:54.993618 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-w84s6_2e7bd884-1b29-4700-912b-d934384c1fec/registry-server/0.log" Nov 25 18:17:54 crc kubenswrapper[4800]: I1125 18:17:54.998349 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-24prz_c98cb101-07e4-44a6-972a-7d6cb9cedfe9/extract-content/0.log" Nov 25 18:17:55 crc kubenswrapper[4800]: I1125 18:17:55.172012 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-24prz_c98cb101-07e4-44a6-972a-7d6cb9cedfe9/extract-utilities/0.log" Nov 25 18:17:55 crc kubenswrapper[4800]: I1125 18:17:55.202716 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-24prz_c98cb101-07e4-44a6-972a-7d6cb9cedfe9/extract-content/0.log" Nov 25 18:17:56 crc kubenswrapper[4800]: I1125 18:17:56.316815 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-24prz_c98cb101-07e4-44a6-972a-7d6cb9cedfe9/registry-server/0.log" Nov 25 18:19:12 crc kubenswrapper[4800]: I1125 18:19:12.640391 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:19:12 crc kubenswrapper[4800]: I1125 18:19:12.641598 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:19:42 crc kubenswrapper[4800]: I1125 18:19:42.640378 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:19:42 crc kubenswrapper[4800]: I1125 18:19:42.641067 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.216437 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sj47c"] Nov 25 18:20:09 crc kubenswrapper[4800]: E1125 18:20:09.217505 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a7b373-f344-4ea3-a777-7b1d1b2deaad" containerName="extract-content" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.217519 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a7b373-f344-4ea3-a777-7b1d1b2deaad" containerName="extract-content" Nov 25 18:20:09 crc kubenswrapper[4800]: E1125 18:20:09.217540 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a7b373-f344-4ea3-a777-7b1d1b2deaad" containerName="extract-utilities" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.217546 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a7b373-f344-4ea3-a777-7b1d1b2deaad" containerName="extract-utilities" Nov 25 18:20:09 crc kubenswrapper[4800]: E1125 18:20:09.217557 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a7b373-f344-4ea3-a777-7b1d1b2deaad" containerName="registry-server" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.217563 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a7b373-f344-4ea3-a777-7b1d1b2deaad" containerName="registry-server" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.217771 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="66a7b373-f344-4ea3-a777-7b1d1b2deaad" containerName="registry-server" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.219134 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.238575 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sj47c"] Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.354745 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6b46672-99f7-45b8-907a-1ad0f9cac034-catalog-content\") pod \"redhat-marketplace-sj47c\" (UID: \"a6b46672-99f7-45b8-907a-1ad0f9cac034\") " pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.355491 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrvcg\" (UniqueName: \"kubernetes.io/projected/a6b46672-99f7-45b8-907a-1ad0f9cac034-kube-api-access-hrvcg\") pod \"redhat-marketplace-sj47c\" (UID: \"a6b46672-99f7-45b8-907a-1ad0f9cac034\") " pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.355607 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6b46672-99f7-45b8-907a-1ad0f9cac034-utilities\") pod \"redhat-marketplace-sj47c\" (UID: \"a6b46672-99f7-45b8-907a-1ad0f9cac034\") " pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.457623 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrvcg\" (UniqueName: \"kubernetes.io/projected/a6b46672-99f7-45b8-907a-1ad0f9cac034-kube-api-access-hrvcg\") pod \"redhat-marketplace-sj47c\" (UID: \"a6b46672-99f7-45b8-907a-1ad0f9cac034\") " pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.457691 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6b46672-99f7-45b8-907a-1ad0f9cac034-utilities\") pod \"redhat-marketplace-sj47c\" (UID: \"a6b46672-99f7-45b8-907a-1ad0f9cac034\") " pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.457805 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6b46672-99f7-45b8-907a-1ad0f9cac034-catalog-content\") pod \"redhat-marketplace-sj47c\" (UID: \"a6b46672-99f7-45b8-907a-1ad0f9cac034\") " pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.458268 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6b46672-99f7-45b8-907a-1ad0f9cac034-catalog-content\") pod \"redhat-marketplace-sj47c\" (UID: \"a6b46672-99f7-45b8-907a-1ad0f9cac034\") " pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.458419 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6b46672-99f7-45b8-907a-1ad0f9cac034-utilities\") pod \"redhat-marketplace-sj47c\" (UID: \"a6b46672-99f7-45b8-907a-1ad0f9cac034\") " pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.492935 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrvcg\" (UniqueName: \"kubernetes.io/projected/a6b46672-99f7-45b8-907a-1ad0f9cac034-kube-api-access-hrvcg\") pod \"redhat-marketplace-sj47c\" (UID: \"a6b46672-99f7-45b8-907a-1ad0f9cac034\") " pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:09 crc kubenswrapper[4800]: I1125 18:20:09.539095 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:10 crc kubenswrapper[4800]: I1125 18:20:10.071135 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sj47c"] Nov 25 18:20:10 crc kubenswrapper[4800]: I1125 18:20:10.662348 4800 generic.go:334] "Generic (PLEG): container finished" podID="a6b46672-99f7-45b8-907a-1ad0f9cac034" containerID="d03c3a662d437b0cdaa2db8cc6c7e6ce36ee6c7870cc48457ec2bd96b347197d" exitCode=0 Nov 25 18:20:10 crc kubenswrapper[4800]: I1125 18:20:10.662417 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sj47c" event={"ID":"a6b46672-99f7-45b8-907a-1ad0f9cac034","Type":"ContainerDied","Data":"d03c3a662d437b0cdaa2db8cc6c7e6ce36ee6c7870cc48457ec2bd96b347197d"} Nov 25 18:20:10 crc kubenswrapper[4800]: I1125 18:20:10.662492 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sj47c" event={"ID":"a6b46672-99f7-45b8-907a-1ad0f9cac034","Type":"ContainerStarted","Data":"85dd46f12ead2af6448bd0583eb9d761d5d5c5c0817ad8de6fd02e31451c8497"} Nov 25 18:20:10 crc kubenswrapper[4800]: I1125 18:20:10.675698 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:20:11 crc kubenswrapper[4800]: I1125 18:20:11.671993 4800 generic.go:334] "Generic (PLEG): container finished" podID="321aa8fa-429b-4e5c-af0c-6ebb42916be3" containerID="193a4a5469689a0ffd0b295abcba6d14562850a973e6853627b2ab44e2d66a55" exitCode=0 Nov 25 18:20:11 crc kubenswrapper[4800]: I1125 18:20:11.672039 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8ctvq/must-gather-t5sw5" event={"ID":"321aa8fa-429b-4e5c-af0c-6ebb42916be3","Type":"ContainerDied","Data":"193a4a5469689a0ffd0b295abcba6d14562850a973e6853627b2ab44e2d66a55"} Nov 25 18:20:11 crc kubenswrapper[4800]: I1125 18:20:11.672981 4800 scope.go:117] "RemoveContainer" containerID="193a4a5469689a0ffd0b295abcba6d14562850a973e6853627b2ab44e2d66a55" Nov 25 18:20:11 crc kubenswrapper[4800]: I1125 18:20:11.674597 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sj47c" event={"ID":"a6b46672-99f7-45b8-907a-1ad0f9cac034","Type":"ContainerStarted","Data":"dbe6bde0801feed629bd0969ca8289821e60e06283a45a5aff7b2a9232d58513"} Nov 25 18:20:12 crc kubenswrapper[4800]: I1125 18:20:12.326317 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8ctvq_must-gather-t5sw5_321aa8fa-429b-4e5c-af0c-6ebb42916be3/gather/0.log" Nov 25 18:20:12 crc kubenswrapper[4800]: I1125 18:20:12.640906 4800 patch_prober.go:28] interesting pod/machine-config-daemon-hvg6z container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:20:12 crc kubenswrapper[4800]: I1125 18:20:12.640987 4800 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:20:12 crc kubenswrapper[4800]: I1125 18:20:12.641040 4800 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" Nov 25 18:20:12 crc kubenswrapper[4800]: I1125 18:20:12.641926 4800 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30"} pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:20:12 crc kubenswrapper[4800]: I1125 18:20:12.642010 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerName="machine-config-daemon" containerID="cri-o://a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" gracePeriod=600 Nov 25 18:20:12 crc kubenswrapper[4800]: I1125 18:20:12.685918 4800 generic.go:334] "Generic (PLEG): container finished" podID="a6b46672-99f7-45b8-907a-1ad0f9cac034" containerID="dbe6bde0801feed629bd0969ca8289821e60e06283a45a5aff7b2a9232d58513" exitCode=0 Nov 25 18:20:12 crc kubenswrapper[4800]: I1125 18:20:12.685974 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sj47c" event={"ID":"a6b46672-99f7-45b8-907a-1ad0f9cac034","Type":"ContainerDied","Data":"dbe6bde0801feed629bd0969ca8289821e60e06283a45a5aff7b2a9232d58513"} Nov 25 18:20:12 crc kubenswrapper[4800]: E1125 18:20:12.896750 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:20:13 crc kubenswrapper[4800]: I1125 18:20:13.700526 4800 generic.go:334] "Generic (PLEG): container finished" podID="9a80af7a-a7d6-4433-97da-7d5d015cd401" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" exitCode=0 Nov 25 18:20:13 crc kubenswrapper[4800]: I1125 18:20:13.700636 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerDied","Data":"a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30"} Nov 25 18:20:13 crc kubenswrapper[4800]: I1125 18:20:13.701063 4800 scope.go:117] "RemoveContainer" containerID="a8c4ea772f008841729175b31f60a576a8aa05aa7616cac9253311e58f50fab7" Nov 25 18:20:13 crc kubenswrapper[4800]: I1125 18:20:13.701775 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:20:13 crc kubenswrapper[4800]: E1125 18:20:13.702203 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:20:13 crc kubenswrapper[4800]: I1125 18:20:13.706911 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sj47c" event={"ID":"a6b46672-99f7-45b8-907a-1ad0f9cac034","Type":"ContainerStarted","Data":"4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45"} Nov 25 18:20:13 crc kubenswrapper[4800]: I1125 18:20:13.751757 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sj47c" podStartSLOduration=2.345800753 podStartE2EDuration="4.751730092s" podCreationTimestamp="2025-11-25 18:20:09 +0000 UTC" firstStartedPulling="2025-11-25 18:20:10.67540812 +0000 UTC m=+10971.729816602" lastFinishedPulling="2025-11-25 18:20:13.081337469 +0000 UTC m=+10974.135745941" observedRunningTime="2025-11-25 18:20:13.745600444 +0000 UTC m=+10974.800008946" watchObservedRunningTime="2025-11-25 18:20:13.751730092 +0000 UTC m=+10974.806138584" Nov 25 18:20:19 crc kubenswrapper[4800]: I1125 18:20:19.539306 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:19 crc kubenswrapper[4800]: I1125 18:20:19.539898 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:19 crc kubenswrapper[4800]: I1125 18:20:19.594648 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:19 crc kubenswrapper[4800]: I1125 18:20:19.877041 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:19 crc kubenswrapper[4800]: I1125 18:20:19.945872 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sj47c"] Nov 25 18:20:21 crc kubenswrapper[4800]: I1125 18:20:21.598075 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-8ctvq/must-gather-t5sw5"] Nov 25 18:20:21 crc kubenswrapper[4800]: I1125 18:20:21.598873 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-8ctvq/must-gather-t5sw5" podUID="321aa8fa-429b-4e5c-af0c-6ebb42916be3" containerName="copy" containerID="cri-o://fd9a565ff9720ad05ed5fda5aa4ed54194dac93e8ebce2dd3c187883c07d088d" gracePeriod=2 Nov 25 18:20:21 crc kubenswrapper[4800]: I1125 18:20:21.609772 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-8ctvq/must-gather-t5sw5"] Nov 25 18:20:21 crc kubenswrapper[4800]: I1125 18:20:21.850117 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8ctvq_must-gather-t5sw5_321aa8fa-429b-4e5c-af0c-6ebb42916be3/copy/0.log" Nov 25 18:20:21 crc kubenswrapper[4800]: I1125 18:20:21.850764 4800 generic.go:334] "Generic (PLEG): container finished" podID="321aa8fa-429b-4e5c-af0c-6ebb42916be3" containerID="fd9a565ff9720ad05ed5fda5aa4ed54194dac93e8ebce2dd3c187883c07d088d" exitCode=143 Nov 25 18:20:21 crc kubenswrapper[4800]: I1125 18:20:21.850975 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sj47c" podUID="a6b46672-99f7-45b8-907a-1ad0f9cac034" containerName="registry-server" containerID="cri-o://4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45" gracePeriod=2 Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.475882 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8ctvq_must-gather-t5sw5_321aa8fa-429b-4e5c-af0c-6ebb42916be3/copy/0.log" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.477370 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/must-gather-t5sw5" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.571461 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhwbx\" (UniqueName: \"kubernetes.io/projected/321aa8fa-429b-4e5c-af0c-6ebb42916be3-kube-api-access-nhwbx\") pod \"321aa8fa-429b-4e5c-af0c-6ebb42916be3\" (UID: \"321aa8fa-429b-4e5c-af0c-6ebb42916be3\") " Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.571688 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/321aa8fa-429b-4e5c-af0c-6ebb42916be3-must-gather-output\") pod \"321aa8fa-429b-4e5c-af0c-6ebb42916be3\" (UID: \"321aa8fa-429b-4e5c-af0c-6ebb42916be3\") " Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.590106 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/321aa8fa-429b-4e5c-af0c-6ebb42916be3-kube-api-access-nhwbx" (OuterVolumeSpecName: "kube-api-access-nhwbx") pod "321aa8fa-429b-4e5c-af0c-6ebb42916be3" (UID: "321aa8fa-429b-4e5c-af0c-6ebb42916be3"). InnerVolumeSpecName "kube-api-access-nhwbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.642338 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.674315 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhwbx\" (UniqueName: \"kubernetes.io/projected/321aa8fa-429b-4e5c-af0c-6ebb42916be3-kube-api-access-nhwbx\") on node \"crc\" DevicePath \"\"" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.775712 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hrvcg\" (UniqueName: \"kubernetes.io/projected/a6b46672-99f7-45b8-907a-1ad0f9cac034-kube-api-access-hrvcg\") pod \"a6b46672-99f7-45b8-907a-1ad0f9cac034\" (UID: \"a6b46672-99f7-45b8-907a-1ad0f9cac034\") " Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.775927 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6b46672-99f7-45b8-907a-1ad0f9cac034-utilities\") pod \"a6b46672-99f7-45b8-907a-1ad0f9cac034\" (UID: \"a6b46672-99f7-45b8-907a-1ad0f9cac034\") " Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.775988 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6b46672-99f7-45b8-907a-1ad0f9cac034-catalog-content\") pod \"a6b46672-99f7-45b8-907a-1ad0f9cac034\" (UID: \"a6b46672-99f7-45b8-907a-1ad0f9cac034\") " Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.779471 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6b46672-99f7-45b8-907a-1ad0f9cac034-utilities" (OuterVolumeSpecName: "utilities") pod "a6b46672-99f7-45b8-907a-1ad0f9cac034" (UID: "a6b46672-99f7-45b8-907a-1ad0f9cac034"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.782130 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6b46672-99f7-45b8-907a-1ad0f9cac034-kube-api-access-hrvcg" (OuterVolumeSpecName: "kube-api-access-hrvcg") pod "a6b46672-99f7-45b8-907a-1ad0f9cac034" (UID: "a6b46672-99f7-45b8-907a-1ad0f9cac034"). InnerVolumeSpecName "kube-api-access-hrvcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.802023 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/321aa8fa-429b-4e5c-af0c-6ebb42916be3-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "321aa8fa-429b-4e5c-af0c-6ebb42916be3" (UID: "321aa8fa-429b-4e5c-af0c-6ebb42916be3"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.805392 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6b46672-99f7-45b8-907a-1ad0f9cac034-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a6b46672-99f7-45b8-907a-1ad0f9cac034" (UID: "a6b46672-99f7-45b8-907a-1ad0f9cac034"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.860278 4800 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8ctvq_must-gather-t5sw5_321aa8fa-429b-4e5c-af0c-6ebb42916be3/copy/0.log" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.860599 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8ctvq/must-gather-t5sw5" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.860643 4800 scope.go:117] "RemoveContainer" containerID="fd9a565ff9720ad05ed5fda5aa4ed54194dac93e8ebce2dd3c187883c07d088d" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.863039 4800 generic.go:334] "Generic (PLEG): container finished" podID="a6b46672-99f7-45b8-907a-1ad0f9cac034" containerID="4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45" exitCode=0 Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.863077 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sj47c" event={"ID":"a6b46672-99f7-45b8-907a-1ad0f9cac034","Type":"ContainerDied","Data":"4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45"} Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.863104 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sj47c" event={"ID":"a6b46672-99f7-45b8-907a-1ad0f9cac034","Type":"ContainerDied","Data":"85dd46f12ead2af6448bd0583eb9d761d5d5c5c0817ad8de6fd02e31451c8497"} Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.863175 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sj47c" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.878059 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6b46672-99f7-45b8-907a-1ad0f9cac034-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.878438 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6b46672-99f7-45b8-907a-1ad0f9cac034-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.878452 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hrvcg\" (UniqueName: \"kubernetes.io/projected/a6b46672-99f7-45b8-907a-1ad0f9cac034-kube-api-access-hrvcg\") on node \"crc\" DevicePath \"\"" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.878462 4800 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/321aa8fa-429b-4e5c-af0c-6ebb42916be3-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.881443 4800 scope.go:117] "RemoveContainer" containerID="193a4a5469689a0ffd0b295abcba6d14562850a973e6853627b2ab44e2d66a55" Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.910552 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sj47c"] Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.919616 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sj47c"] Nov 25 18:20:22 crc kubenswrapper[4800]: I1125 18:20:22.980869 4800 scope.go:117] "RemoveContainer" containerID="4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45" Nov 25 18:20:23 crc kubenswrapper[4800]: I1125 18:20:23.014309 4800 scope.go:117] "RemoveContainer" containerID="dbe6bde0801feed629bd0969ca8289821e60e06283a45a5aff7b2a9232d58513" Nov 25 18:20:23 crc kubenswrapper[4800]: I1125 18:20:23.035652 4800 scope.go:117] "RemoveContainer" containerID="d03c3a662d437b0cdaa2db8cc6c7e6ce36ee6c7870cc48457ec2bd96b347197d" Nov 25 18:20:23 crc kubenswrapper[4800]: I1125 18:20:23.091887 4800 scope.go:117] "RemoveContainer" containerID="4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45" Nov 25 18:20:23 crc kubenswrapper[4800]: E1125 18:20:23.099882 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45\": container with ID starting with 4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45 not found: ID does not exist" containerID="4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45" Nov 25 18:20:23 crc kubenswrapper[4800]: I1125 18:20:23.099912 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45"} err="failed to get container status \"4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45\": rpc error: code = NotFound desc = could not find container \"4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45\": container with ID starting with 4616d2b9a8a712abc988337d9d972655ecf56f54dd97ae5830cd40796c33ca45 not found: ID does not exist" Nov 25 18:20:23 crc kubenswrapper[4800]: I1125 18:20:23.099933 4800 scope.go:117] "RemoveContainer" containerID="dbe6bde0801feed629bd0969ca8289821e60e06283a45a5aff7b2a9232d58513" Nov 25 18:20:23 crc kubenswrapper[4800]: E1125 18:20:23.100379 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbe6bde0801feed629bd0969ca8289821e60e06283a45a5aff7b2a9232d58513\": container with ID starting with dbe6bde0801feed629bd0969ca8289821e60e06283a45a5aff7b2a9232d58513 not found: ID does not exist" containerID="dbe6bde0801feed629bd0969ca8289821e60e06283a45a5aff7b2a9232d58513" Nov 25 18:20:23 crc kubenswrapper[4800]: I1125 18:20:23.100494 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbe6bde0801feed629bd0969ca8289821e60e06283a45a5aff7b2a9232d58513"} err="failed to get container status \"dbe6bde0801feed629bd0969ca8289821e60e06283a45a5aff7b2a9232d58513\": rpc error: code = NotFound desc = could not find container \"dbe6bde0801feed629bd0969ca8289821e60e06283a45a5aff7b2a9232d58513\": container with ID starting with dbe6bde0801feed629bd0969ca8289821e60e06283a45a5aff7b2a9232d58513 not found: ID does not exist" Nov 25 18:20:23 crc kubenswrapper[4800]: I1125 18:20:23.100578 4800 scope.go:117] "RemoveContainer" containerID="d03c3a662d437b0cdaa2db8cc6c7e6ce36ee6c7870cc48457ec2bd96b347197d" Nov 25 18:20:23 crc kubenswrapper[4800]: E1125 18:20:23.101266 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d03c3a662d437b0cdaa2db8cc6c7e6ce36ee6c7870cc48457ec2bd96b347197d\": container with ID starting with d03c3a662d437b0cdaa2db8cc6c7e6ce36ee6c7870cc48457ec2bd96b347197d not found: ID does not exist" containerID="d03c3a662d437b0cdaa2db8cc6c7e6ce36ee6c7870cc48457ec2bd96b347197d" Nov 25 18:20:23 crc kubenswrapper[4800]: I1125 18:20:23.101314 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d03c3a662d437b0cdaa2db8cc6c7e6ce36ee6c7870cc48457ec2bd96b347197d"} err="failed to get container status \"d03c3a662d437b0cdaa2db8cc6c7e6ce36ee6c7870cc48457ec2bd96b347197d\": rpc error: code = NotFound desc = could not find container \"d03c3a662d437b0cdaa2db8cc6c7e6ce36ee6c7870cc48457ec2bd96b347197d\": container with ID starting with d03c3a662d437b0cdaa2db8cc6c7e6ce36ee6c7870cc48457ec2bd96b347197d not found: ID does not exist" Nov 25 18:20:23 crc kubenswrapper[4800]: I1125 18:20:23.799051 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="321aa8fa-429b-4e5c-af0c-6ebb42916be3" path="/var/lib/kubelet/pods/321aa8fa-429b-4e5c-af0c-6ebb42916be3/volumes" Nov 25 18:20:23 crc kubenswrapper[4800]: I1125 18:20:23.800600 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6b46672-99f7-45b8-907a-1ad0f9cac034" path="/var/lib/kubelet/pods/a6b46672-99f7-45b8-907a-1ad0f9cac034/volumes" Nov 25 18:20:25 crc kubenswrapper[4800]: I1125 18:20:25.785926 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:20:25 crc kubenswrapper[4800]: E1125 18:20:25.786921 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:20:35 crc kubenswrapper[4800]: I1125 18:20:35.465354 4800 scope.go:117] "RemoveContainer" containerID="ad9223e8f87496aa2373267b7e782ccb13386ad573d64ddfe3f8a07c810447fd" Nov 25 18:20:37 crc kubenswrapper[4800]: I1125 18:20:37.786212 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:20:37 crc kubenswrapper[4800]: E1125 18:20:37.787038 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:20:49 crc kubenswrapper[4800]: I1125 18:20:49.789769 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:20:49 crc kubenswrapper[4800]: E1125 18:20:49.790548 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:21:04 crc kubenswrapper[4800]: I1125 18:21:04.786071 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:21:04 crc kubenswrapper[4800]: E1125 18:21:04.786837 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:21:19 crc kubenswrapper[4800]: I1125 18:21:19.804928 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:21:19 crc kubenswrapper[4800]: E1125 18:21:19.806224 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:21:33 crc kubenswrapper[4800]: I1125 18:21:33.786111 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:21:33 crc kubenswrapper[4800]: E1125 18:21:33.787005 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:21:35 crc kubenswrapper[4800]: I1125 18:21:35.535755 4800 scope.go:117] "RemoveContainer" containerID="71860c49a3e6c0d5fc60098df5a9e8a00f730608eba102976931b876a6a53a4c" Nov 25 18:21:47 crc kubenswrapper[4800]: I1125 18:21:47.786082 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:21:47 crc kubenswrapper[4800]: E1125 18:21:47.787678 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.843764 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-khgf2"] Nov 25 18:21:48 crc kubenswrapper[4800]: E1125 18:21:48.844504 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6b46672-99f7-45b8-907a-1ad0f9cac034" containerName="extract-utilities" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.844519 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6b46672-99f7-45b8-907a-1ad0f9cac034" containerName="extract-utilities" Nov 25 18:21:48 crc kubenswrapper[4800]: E1125 18:21:48.844539 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="321aa8fa-429b-4e5c-af0c-6ebb42916be3" containerName="copy" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.844546 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="321aa8fa-429b-4e5c-af0c-6ebb42916be3" containerName="copy" Nov 25 18:21:48 crc kubenswrapper[4800]: E1125 18:21:48.844568 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="321aa8fa-429b-4e5c-af0c-6ebb42916be3" containerName="gather" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.844577 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="321aa8fa-429b-4e5c-af0c-6ebb42916be3" containerName="gather" Nov 25 18:21:48 crc kubenswrapper[4800]: E1125 18:21:48.844586 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6b46672-99f7-45b8-907a-1ad0f9cac034" containerName="registry-server" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.844595 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6b46672-99f7-45b8-907a-1ad0f9cac034" containerName="registry-server" Nov 25 18:21:48 crc kubenswrapper[4800]: E1125 18:21:48.844635 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6b46672-99f7-45b8-907a-1ad0f9cac034" containerName="extract-content" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.844643 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6b46672-99f7-45b8-907a-1ad0f9cac034" containerName="extract-content" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.844835 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6b46672-99f7-45b8-907a-1ad0f9cac034" containerName="registry-server" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.844876 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="321aa8fa-429b-4e5c-af0c-6ebb42916be3" containerName="copy" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.844900 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="321aa8fa-429b-4e5c-af0c-6ebb42916be3" containerName="gather" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.848106 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.858230 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-khgf2"] Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.931204 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp569\" (UniqueName: \"kubernetes.io/projected/ba5a3982-696c-420f-bec2-51386edd1bab-kube-api-access-kp569\") pod \"redhat-operators-khgf2\" (UID: \"ba5a3982-696c-420f-bec2-51386edd1bab\") " pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.931318 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba5a3982-696c-420f-bec2-51386edd1bab-catalog-content\") pod \"redhat-operators-khgf2\" (UID: \"ba5a3982-696c-420f-bec2-51386edd1bab\") " pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:21:48 crc kubenswrapper[4800]: I1125 18:21:48.931443 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba5a3982-696c-420f-bec2-51386edd1bab-utilities\") pod \"redhat-operators-khgf2\" (UID: \"ba5a3982-696c-420f-bec2-51386edd1bab\") " pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:21:49 crc kubenswrapper[4800]: I1125 18:21:49.033239 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba5a3982-696c-420f-bec2-51386edd1bab-utilities\") pod \"redhat-operators-khgf2\" (UID: \"ba5a3982-696c-420f-bec2-51386edd1bab\") " pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:21:49 crc kubenswrapper[4800]: I1125 18:21:49.033367 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp569\" (UniqueName: \"kubernetes.io/projected/ba5a3982-696c-420f-bec2-51386edd1bab-kube-api-access-kp569\") pod \"redhat-operators-khgf2\" (UID: \"ba5a3982-696c-420f-bec2-51386edd1bab\") " pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:21:49 crc kubenswrapper[4800]: I1125 18:21:49.033430 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba5a3982-696c-420f-bec2-51386edd1bab-catalog-content\") pod \"redhat-operators-khgf2\" (UID: \"ba5a3982-696c-420f-bec2-51386edd1bab\") " pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:21:49 crc kubenswrapper[4800]: I1125 18:21:49.033866 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba5a3982-696c-420f-bec2-51386edd1bab-utilities\") pod \"redhat-operators-khgf2\" (UID: \"ba5a3982-696c-420f-bec2-51386edd1bab\") " pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:21:49 crc kubenswrapper[4800]: I1125 18:21:49.033976 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba5a3982-696c-420f-bec2-51386edd1bab-catalog-content\") pod \"redhat-operators-khgf2\" (UID: \"ba5a3982-696c-420f-bec2-51386edd1bab\") " pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:21:49 crc kubenswrapper[4800]: I1125 18:21:49.075605 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp569\" (UniqueName: \"kubernetes.io/projected/ba5a3982-696c-420f-bec2-51386edd1bab-kube-api-access-kp569\") pod \"redhat-operators-khgf2\" (UID: \"ba5a3982-696c-420f-bec2-51386edd1bab\") " pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:21:49 crc kubenswrapper[4800]: I1125 18:21:49.256909 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:21:49 crc kubenswrapper[4800]: I1125 18:21:49.719791 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-khgf2"] Nov 25 18:21:49 crc kubenswrapper[4800]: I1125 18:21:49.764010 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khgf2" event={"ID":"ba5a3982-696c-420f-bec2-51386edd1bab","Type":"ContainerStarted","Data":"5e7046b1e869f88106114afe3f66b1c50939826b59afe98e7921cdbdff456f51"} Nov 25 18:21:50 crc kubenswrapper[4800]: I1125 18:21:50.775055 4800 generic.go:334] "Generic (PLEG): container finished" podID="ba5a3982-696c-420f-bec2-51386edd1bab" containerID="00d5162bbd31877d904f329c5c66d95618793aeda2cfc5d2f0524929f7a815c8" exitCode=0 Nov 25 18:21:50 crc kubenswrapper[4800]: I1125 18:21:50.775111 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khgf2" event={"ID":"ba5a3982-696c-420f-bec2-51386edd1bab","Type":"ContainerDied","Data":"00d5162bbd31877d904f329c5c66d95618793aeda2cfc5d2f0524929f7a815c8"} Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.608938 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dgmhf"] Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.611750 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.616280 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dgmhf"] Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.680785 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffe2037-f39f-4564-a129-0a09810c9cdf-catalog-content\") pod \"community-operators-dgmhf\" (UID: \"cffe2037-f39f-4564-a129-0a09810c9cdf\") " pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.681034 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffe2037-f39f-4564-a129-0a09810c9cdf-utilities\") pod \"community-operators-dgmhf\" (UID: \"cffe2037-f39f-4564-a129-0a09810c9cdf\") " pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.681142 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nlgd\" (UniqueName: \"kubernetes.io/projected/cffe2037-f39f-4564-a129-0a09810c9cdf-kube-api-access-9nlgd\") pod \"community-operators-dgmhf\" (UID: \"cffe2037-f39f-4564-a129-0a09810c9cdf\") " pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.782503 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffe2037-f39f-4564-a129-0a09810c9cdf-catalog-content\") pod \"community-operators-dgmhf\" (UID: \"cffe2037-f39f-4564-a129-0a09810c9cdf\") " pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.782595 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffe2037-f39f-4564-a129-0a09810c9cdf-utilities\") pod \"community-operators-dgmhf\" (UID: \"cffe2037-f39f-4564-a129-0a09810c9cdf\") " pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.782642 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nlgd\" (UniqueName: \"kubernetes.io/projected/cffe2037-f39f-4564-a129-0a09810c9cdf-kube-api-access-9nlgd\") pod \"community-operators-dgmhf\" (UID: \"cffe2037-f39f-4564-a129-0a09810c9cdf\") " pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.783188 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffe2037-f39f-4564-a129-0a09810c9cdf-catalog-content\") pod \"community-operators-dgmhf\" (UID: \"cffe2037-f39f-4564-a129-0a09810c9cdf\") " pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.783423 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffe2037-f39f-4564-a129-0a09810c9cdf-utilities\") pod \"community-operators-dgmhf\" (UID: \"cffe2037-f39f-4564-a129-0a09810c9cdf\") " pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.824269 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nlgd\" (UniqueName: \"kubernetes.io/projected/cffe2037-f39f-4564-a129-0a09810c9cdf-kube-api-access-9nlgd\") pod \"community-operators-dgmhf\" (UID: \"cffe2037-f39f-4564-a129-0a09810c9cdf\") " pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:21:51 crc kubenswrapper[4800]: I1125 18:21:51.930174 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:21:52 crc kubenswrapper[4800]: W1125 18:21:52.480812 4800 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcffe2037_f39f_4564_a129_0a09810c9cdf.slice/crio-d2c40c48534ddff9d03d68e792e5aedaa072c10a5f70356d13e0ddd076ac6217 WatchSource:0}: Error finding container d2c40c48534ddff9d03d68e792e5aedaa072c10a5f70356d13e0ddd076ac6217: Status 404 returned error can't find the container with id d2c40c48534ddff9d03d68e792e5aedaa072c10a5f70356d13e0ddd076ac6217 Nov 25 18:21:52 crc kubenswrapper[4800]: I1125 18:21:52.485951 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dgmhf"] Nov 25 18:21:52 crc kubenswrapper[4800]: I1125 18:21:52.794317 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khgf2" event={"ID":"ba5a3982-696c-420f-bec2-51386edd1bab","Type":"ContainerStarted","Data":"9061fada1628cf9662212477dd8200671d92d3908bf9a9d11e4f43df0430edd2"} Nov 25 18:21:52 crc kubenswrapper[4800]: I1125 18:21:52.795835 4800 generic.go:334] "Generic (PLEG): container finished" podID="cffe2037-f39f-4564-a129-0a09810c9cdf" containerID="13e71984cbcdfca1e76eac274497fac1dbd924827fd34fbfd5c546bbed7672bd" exitCode=0 Nov 25 18:21:52 crc kubenswrapper[4800]: I1125 18:21:52.795879 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgmhf" event={"ID":"cffe2037-f39f-4564-a129-0a09810c9cdf","Type":"ContainerDied","Data":"13e71984cbcdfca1e76eac274497fac1dbd924827fd34fbfd5c546bbed7672bd"} Nov 25 18:21:52 crc kubenswrapper[4800]: I1125 18:21:52.795894 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgmhf" event={"ID":"cffe2037-f39f-4564-a129-0a09810c9cdf","Type":"ContainerStarted","Data":"d2c40c48534ddff9d03d68e792e5aedaa072c10a5f70356d13e0ddd076ac6217"} Nov 25 18:21:53 crc kubenswrapper[4800]: I1125 18:21:53.806381 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgmhf" event={"ID":"cffe2037-f39f-4564-a129-0a09810c9cdf","Type":"ContainerStarted","Data":"b4ce54f0aa4bf6ea03b956d0aed6a22b61903bb09936d2322db725c7acd2faf3"} Nov 25 18:21:57 crc kubenswrapper[4800]: I1125 18:21:57.849559 4800 generic.go:334] "Generic (PLEG): container finished" podID="cffe2037-f39f-4564-a129-0a09810c9cdf" containerID="b4ce54f0aa4bf6ea03b956d0aed6a22b61903bb09936d2322db725c7acd2faf3" exitCode=0 Nov 25 18:21:57 crc kubenswrapper[4800]: I1125 18:21:57.849630 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgmhf" event={"ID":"cffe2037-f39f-4564-a129-0a09810c9cdf","Type":"ContainerDied","Data":"b4ce54f0aa4bf6ea03b956d0aed6a22b61903bb09936d2322db725c7acd2faf3"} Nov 25 18:21:57 crc kubenswrapper[4800]: I1125 18:21:57.854515 4800 generic.go:334] "Generic (PLEG): container finished" podID="ba5a3982-696c-420f-bec2-51386edd1bab" containerID="9061fada1628cf9662212477dd8200671d92d3908bf9a9d11e4f43df0430edd2" exitCode=0 Nov 25 18:21:57 crc kubenswrapper[4800]: I1125 18:21:57.854577 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khgf2" event={"ID":"ba5a3982-696c-420f-bec2-51386edd1bab","Type":"ContainerDied","Data":"9061fada1628cf9662212477dd8200671d92d3908bf9a9d11e4f43df0430edd2"} Nov 25 18:21:59 crc kubenswrapper[4800]: I1125 18:21:59.793259 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:21:59 crc kubenswrapper[4800]: E1125 18:21:59.794096 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:21:59 crc kubenswrapper[4800]: I1125 18:21:59.880987 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khgf2" event={"ID":"ba5a3982-696c-420f-bec2-51386edd1bab","Type":"ContainerStarted","Data":"063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba"} Nov 25 18:21:59 crc kubenswrapper[4800]: I1125 18:21:59.883534 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgmhf" event={"ID":"cffe2037-f39f-4564-a129-0a09810c9cdf","Type":"ContainerStarted","Data":"39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4"} Nov 25 18:21:59 crc kubenswrapper[4800]: I1125 18:21:59.903349 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-khgf2" podStartSLOduration=3.717273069 podStartE2EDuration="11.903328733s" podCreationTimestamp="2025-11-25 18:21:48 +0000 UTC" firstStartedPulling="2025-11-25 18:21:50.777659214 +0000 UTC m=+11071.832067696" lastFinishedPulling="2025-11-25 18:21:58.963714878 +0000 UTC m=+11080.018123360" observedRunningTime="2025-11-25 18:21:59.898696236 +0000 UTC m=+11080.953104718" watchObservedRunningTime="2025-11-25 18:21:59.903328733 +0000 UTC m=+11080.957737215" Nov 25 18:21:59 crc kubenswrapper[4800]: I1125 18:21:59.923616 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dgmhf" podStartSLOduration=2.6904249890000003 podStartE2EDuration="8.923598435s" podCreationTimestamp="2025-11-25 18:21:51 +0000 UTC" firstStartedPulling="2025-11-25 18:21:52.797602339 +0000 UTC m=+11073.852010821" lastFinishedPulling="2025-11-25 18:21:59.030775785 +0000 UTC m=+11080.085184267" observedRunningTime="2025-11-25 18:21:59.921006644 +0000 UTC m=+11080.975415136" watchObservedRunningTime="2025-11-25 18:21:59.923598435 +0000 UTC m=+11080.978006927" Nov 25 18:22:01 crc kubenswrapper[4800]: I1125 18:22:01.930405 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:22:01 crc kubenswrapper[4800]: I1125 18:22:01.930828 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:22:01 crc kubenswrapper[4800]: I1125 18:22:01.985362 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:22:09 crc kubenswrapper[4800]: I1125 18:22:09.258068 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:22:09 crc kubenswrapper[4800]: I1125 18:22:09.258367 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:22:09 crc kubenswrapper[4800]: I1125 18:22:09.310065 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:22:10 crc kubenswrapper[4800]: I1125 18:22:10.327370 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:22:10 crc kubenswrapper[4800]: I1125 18:22:10.409046 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-khgf2"] Nov 25 18:22:11 crc kubenswrapper[4800]: I1125 18:22:11.998428 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.006694 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-khgf2" podUID="ba5a3982-696c-420f-bec2-51386edd1bab" containerName="registry-server" containerID="cri-o://063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba" gracePeriod=2 Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.055057 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dgmhf"] Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.491930 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.629962 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kp569\" (UniqueName: \"kubernetes.io/projected/ba5a3982-696c-420f-bec2-51386edd1bab-kube-api-access-kp569\") pod \"ba5a3982-696c-420f-bec2-51386edd1bab\" (UID: \"ba5a3982-696c-420f-bec2-51386edd1bab\") " Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.630112 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba5a3982-696c-420f-bec2-51386edd1bab-utilities\") pod \"ba5a3982-696c-420f-bec2-51386edd1bab\" (UID: \"ba5a3982-696c-420f-bec2-51386edd1bab\") " Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.630354 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba5a3982-696c-420f-bec2-51386edd1bab-catalog-content\") pod \"ba5a3982-696c-420f-bec2-51386edd1bab\" (UID: \"ba5a3982-696c-420f-bec2-51386edd1bab\") " Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.631667 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba5a3982-696c-420f-bec2-51386edd1bab-utilities" (OuterVolumeSpecName: "utilities") pod "ba5a3982-696c-420f-bec2-51386edd1bab" (UID: "ba5a3982-696c-420f-bec2-51386edd1bab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.636917 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba5a3982-696c-420f-bec2-51386edd1bab-kube-api-access-kp569" (OuterVolumeSpecName: "kube-api-access-kp569") pod "ba5a3982-696c-420f-bec2-51386edd1bab" (UID: "ba5a3982-696c-420f-bec2-51386edd1bab"). InnerVolumeSpecName "kube-api-access-kp569". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.733606 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kp569\" (UniqueName: \"kubernetes.io/projected/ba5a3982-696c-420f-bec2-51386edd1bab-kube-api-access-kp569\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.733664 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba5a3982-696c-420f-bec2-51386edd1bab-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.743722 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba5a3982-696c-420f-bec2-51386edd1bab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ba5a3982-696c-420f-bec2-51386edd1bab" (UID: "ba5a3982-696c-420f-bec2-51386edd1bab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.784915 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:22:12 crc kubenswrapper[4800]: E1125 18:22:12.785311 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:22:12 crc kubenswrapper[4800]: I1125 18:22:12.836282 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba5a3982-696c-420f-bec2-51386edd1bab-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.017110 4800 generic.go:334] "Generic (PLEG): container finished" podID="ba5a3982-696c-420f-bec2-51386edd1bab" containerID="063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba" exitCode=0 Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.017171 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-khgf2" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.017161 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khgf2" event={"ID":"ba5a3982-696c-420f-bec2-51386edd1bab","Type":"ContainerDied","Data":"063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba"} Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.017240 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khgf2" event={"ID":"ba5a3982-696c-420f-bec2-51386edd1bab","Type":"ContainerDied","Data":"5e7046b1e869f88106114afe3f66b1c50939826b59afe98e7921cdbdff456f51"} Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.017271 4800 scope.go:117] "RemoveContainer" containerID="063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.017801 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dgmhf" podUID="cffe2037-f39f-4564-a129-0a09810c9cdf" containerName="registry-server" containerID="cri-o://39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4" gracePeriod=2 Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.040996 4800 scope.go:117] "RemoveContainer" containerID="9061fada1628cf9662212477dd8200671d92d3908bf9a9d11e4f43df0430edd2" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.069454 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-khgf2"] Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.081397 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-khgf2"] Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.098638 4800 scope.go:117] "RemoveContainer" containerID="00d5162bbd31877d904f329c5c66d95618793aeda2cfc5d2f0524929f7a815c8" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.278048 4800 scope.go:117] "RemoveContainer" containerID="063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba" Nov 25 18:22:13 crc kubenswrapper[4800]: E1125 18:22:13.280553 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba\": container with ID starting with 063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba not found: ID does not exist" containerID="063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.280610 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba"} err="failed to get container status \"063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba\": rpc error: code = NotFound desc = could not find container \"063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba\": container with ID starting with 063195afe562891663f0d629c4221b40842c05b36db29b9267b7b7ab385f30ba not found: ID does not exist" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.280649 4800 scope.go:117] "RemoveContainer" containerID="9061fada1628cf9662212477dd8200671d92d3908bf9a9d11e4f43df0430edd2" Nov 25 18:22:13 crc kubenswrapper[4800]: E1125 18:22:13.281207 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9061fada1628cf9662212477dd8200671d92d3908bf9a9d11e4f43df0430edd2\": container with ID starting with 9061fada1628cf9662212477dd8200671d92d3908bf9a9d11e4f43df0430edd2 not found: ID does not exist" containerID="9061fada1628cf9662212477dd8200671d92d3908bf9a9d11e4f43df0430edd2" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.281249 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9061fada1628cf9662212477dd8200671d92d3908bf9a9d11e4f43df0430edd2"} err="failed to get container status \"9061fada1628cf9662212477dd8200671d92d3908bf9a9d11e4f43df0430edd2\": rpc error: code = NotFound desc = could not find container \"9061fada1628cf9662212477dd8200671d92d3908bf9a9d11e4f43df0430edd2\": container with ID starting with 9061fada1628cf9662212477dd8200671d92d3908bf9a9d11e4f43df0430edd2 not found: ID does not exist" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.281279 4800 scope.go:117] "RemoveContainer" containerID="00d5162bbd31877d904f329c5c66d95618793aeda2cfc5d2f0524929f7a815c8" Nov 25 18:22:13 crc kubenswrapper[4800]: E1125 18:22:13.281612 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00d5162bbd31877d904f329c5c66d95618793aeda2cfc5d2f0524929f7a815c8\": container with ID starting with 00d5162bbd31877d904f329c5c66d95618793aeda2cfc5d2f0524929f7a815c8 not found: ID does not exist" containerID="00d5162bbd31877d904f329c5c66d95618793aeda2cfc5d2f0524929f7a815c8" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.281646 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00d5162bbd31877d904f329c5c66d95618793aeda2cfc5d2f0524929f7a815c8"} err="failed to get container status \"00d5162bbd31877d904f329c5c66d95618793aeda2cfc5d2f0524929f7a815c8\": rpc error: code = NotFound desc = could not find container \"00d5162bbd31877d904f329c5c66d95618793aeda2cfc5d2f0524929f7a815c8\": container with ID starting with 00d5162bbd31877d904f329c5c66d95618793aeda2cfc5d2f0524929f7a815c8 not found: ID does not exist" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.517478 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.651281 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nlgd\" (UniqueName: \"kubernetes.io/projected/cffe2037-f39f-4564-a129-0a09810c9cdf-kube-api-access-9nlgd\") pod \"cffe2037-f39f-4564-a129-0a09810c9cdf\" (UID: \"cffe2037-f39f-4564-a129-0a09810c9cdf\") " Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.651327 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffe2037-f39f-4564-a129-0a09810c9cdf-utilities\") pod \"cffe2037-f39f-4564-a129-0a09810c9cdf\" (UID: \"cffe2037-f39f-4564-a129-0a09810c9cdf\") " Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.651477 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffe2037-f39f-4564-a129-0a09810c9cdf-catalog-content\") pod \"cffe2037-f39f-4564-a129-0a09810c9cdf\" (UID: \"cffe2037-f39f-4564-a129-0a09810c9cdf\") " Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.652082 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cffe2037-f39f-4564-a129-0a09810c9cdf-utilities" (OuterVolumeSpecName: "utilities") pod "cffe2037-f39f-4564-a129-0a09810c9cdf" (UID: "cffe2037-f39f-4564-a129-0a09810c9cdf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.657424 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cffe2037-f39f-4564-a129-0a09810c9cdf-kube-api-access-9nlgd" (OuterVolumeSpecName: "kube-api-access-9nlgd") pod "cffe2037-f39f-4564-a129-0a09810c9cdf" (UID: "cffe2037-f39f-4564-a129-0a09810c9cdf"). InnerVolumeSpecName "kube-api-access-9nlgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.705107 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cffe2037-f39f-4564-a129-0a09810c9cdf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cffe2037-f39f-4564-a129-0a09810c9cdf" (UID: "cffe2037-f39f-4564-a129-0a09810c9cdf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.753581 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9nlgd\" (UniqueName: \"kubernetes.io/projected/cffe2037-f39f-4564-a129-0a09810c9cdf-kube-api-access-9nlgd\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.753631 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffe2037-f39f-4564-a129-0a09810c9cdf-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.753644 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffe2037-f39f-4564-a129-0a09810c9cdf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:13 crc kubenswrapper[4800]: I1125 18:22:13.823077 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba5a3982-696c-420f-bec2-51386edd1bab" path="/var/lib/kubelet/pods/ba5a3982-696c-420f-bec2-51386edd1bab/volumes" Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.033214 4800 generic.go:334] "Generic (PLEG): container finished" podID="cffe2037-f39f-4564-a129-0a09810c9cdf" containerID="39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4" exitCode=0 Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.033282 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgmhf" event={"ID":"cffe2037-f39f-4564-a129-0a09810c9cdf","Type":"ContainerDied","Data":"39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4"} Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.033332 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dgmhf" Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.033354 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgmhf" event={"ID":"cffe2037-f39f-4564-a129-0a09810c9cdf","Type":"ContainerDied","Data":"d2c40c48534ddff9d03d68e792e5aedaa072c10a5f70356d13e0ddd076ac6217"} Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.033382 4800 scope.go:117] "RemoveContainer" containerID="39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4" Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.064084 4800 scope.go:117] "RemoveContainer" containerID="b4ce54f0aa4bf6ea03b956d0aed6a22b61903bb09936d2322db725c7acd2faf3" Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.065900 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dgmhf"] Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.075887 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dgmhf"] Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.088209 4800 scope.go:117] "RemoveContainer" containerID="13e71984cbcdfca1e76eac274497fac1dbd924827fd34fbfd5c546bbed7672bd" Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.112444 4800 scope.go:117] "RemoveContainer" containerID="39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4" Nov 25 18:22:14 crc kubenswrapper[4800]: E1125 18:22:14.113344 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4\": container with ID starting with 39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4 not found: ID does not exist" containerID="39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4" Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.113410 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4"} err="failed to get container status \"39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4\": rpc error: code = NotFound desc = could not find container \"39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4\": container with ID starting with 39fa000d1b5494a95fa18a8f6af1822769d5af18404754da7b454063d59032c4 not found: ID does not exist" Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.113451 4800 scope.go:117] "RemoveContainer" containerID="b4ce54f0aa4bf6ea03b956d0aed6a22b61903bb09936d2322db725c7acd2faf3" Nov 25 18:22:14 crc kubenswrapper[4800]: E1125 18:22:14.116749 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4ce54f0aa4bf6ea03b956d0aed6a22b61903bb09936d2322db725c7acd2faf3\": container with ID starting with b4ce54f0aa4bf6ea03b956d0aed6a22b61903bb09936d2322db725c7acd2faf3 not found: ID does not exist" containerID="b4ce54f0aa4bf6ea03b956d0aed6a22b61903bb09936d2322db725c7acd2faf3" Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.116795 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4ce54f0aa4bf6ea03b956d0aed6a22b61903bb09936d2322db725c7acd2faf3"} err="failed to get container status \"b4ce54f0aa4bf6ea03b956d0aed6a22b61903bb09936d2322db725c7acd2faf3\": rpc error: code = NotFound desc = could not find container \"b4ce54f0aa4bf6ea03b956d0aed6a22b61903bb09936d2322db725c7acd2faf3\": container with ID starting with b4ce54f0aa4bf6ea03b956d0aed6a22b61903bb09936d2322db725c7acd2faf3 not found: ID does not exist" Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.116821 4800 scope.go:117] "RemoveContainer" containerID="13e71984cbcdfca1e76eac274497fac1dbd924827fd34fbfd5c546bbed7672bd" Nov 25 18:22:14 crc kubenswrapper[4800]: E1125 18:22:14.117402 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13e71984cbcdfca1e76eac274497fac1dbd924827fd34fbfd5c546bbed7672bd\": container with ID starting with 13e71984cbcdfca1e76eac274497fac1dbd924827fd34fbfd5c546bbed7672bd not found: ID does not exist" containerID="13e71984cbcdfca1e76eac274497fac1dbd924827fd34fbfd5c546bbed7672bd" Nov 25 18:22:14 crc kubenswrapper[4800]: I1125 18:22:14.117508 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13e71984cbcdfca1e76eac274497fac1dbd924827fd34fbfd5c546bbed7672bd"} err="failed to get container status \"13e71984cbcdfca1e76eac274497fac1dbd924827fd34fbfd5c546bbed7672bd\": rpc error: code = NotFound desc = could not find container \"13e71984cbcdfca1e76eac274497fac1dbd924827fd34fbfd5c546bbed7672bd\": container with ID starting with 13e71984cbcdfca1e76eac274497fac1dbd924827fd34fbfd5c546bbed7672bd not found: ID does not exist" Nov 25 18:22:15 crc kubenswrapper[4800]: I1125 18:22:15.804316 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cffe2037-f39f-4564-a129-0a09810c9cdf" path="/var/lib/kubelet/pods/cffe2037-f39f-4564-a129-0a09810c9cdf/volumes" Nov 25 18:22:27 crc kubenswrapper[4800]: I1125 18:22:27.785479 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:22:27 crc kubenswrapper[4800]: E1125 18:22:27.787692 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:22:40 crc kubenswrapper[4800]: I1125 18:22:40.785706 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:22:40 crc kubenswrapper[4800]: E1125 18:22:40.786504 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:22:51 crc kubenswrapper[4800]: I1125 18:22:51.785990 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:22:51 crc kubenswrapper[4800]: E1125 18:22:51.787205 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:23:04 crc kubenswrapper[4800]: I1125 18:23:04.787114 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:23:04 crc kubenswrapper[4800]: E1125 18:23:04.787979 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:23:19 crc kubenswrapper[4800]: I1125 18:23:19.803987 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:23:19 crc kubenswrapper[4800]: E1125 18:23:19.804900 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:23:30 crc kubenswrapper[4800]: I1125 18:23:30.785425 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:23:30 crc kubenswrapper[4800]: E1125 18:23:30.786330 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:23:43 crc kubenswrapper[4800]: I1125 18:23:43.785548 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:23:43 crc kubenswrapper[4800]: E1125 18:23:43.786204 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:23:58 crc kubenswrapper[4800]: I1125 18:23:58.785320 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:23:58 crc kubenswrapper[4800]: E1125 18:23:58.786087 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:24:11 crc kubenswrapper[4800]: I1125 18:24:11.786893 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:24:11 crc kubenswrapper[4800]: E1125 18:24:11.787832 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:24:26 crc kubenswrapper[4800]: I1125 18:24:26.786190 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:24:26 crc kubenswrapper[4800]: E1125 18:24:26.787261 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:24:38 crc kubenswrapper[4800]: I1125 18:24:38.786033 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:24:38 crc kubenswrapper[4800]: E1125 18:24:38.786766 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:24:50 crc kubenswrapper[4800]: I1125 18:24:50.786114 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:24:50 crc kubenswrapper[4800]: E1125 18:24:50.786797 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:25:01 crc kubenswrapper[4800]: I1125 18:25:01.786051 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:25:01 crc kubenswrapper[4800]: E1125 18:25:01.787286 4800 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hvg6z_openshift-machine-config-operator(9a80af7a-a7d6-4433-97da-7d5d015cd401)\"" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" podUID="9a80af7a-a7d6-4433-97da-7d5d015cd401" Nov 25 18:25:15 crc kubenswrapper[4800]: I1125 18:25:15.786641 4800 scope.go:117] "RemoveContainer" containerID="a5b9b30dc83904406dcb03e0b4a562cd5cb973bc4bd9874a46231280fe87aa30" Nov 25 18:25:16 crc kubenswrapper[4800]: I1125 18:25:16.624239 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hvg6z" event={"ID":"9a80af7a-a7d6-4433-97da-7d5d015cd401","Type":"ContainerStarted","Data":"6402e294c6b1af441f77481a733cc6039f709e1f923b940d20072f5b370fee63"} Nov 25 18:25:23 crc kubenswrapper[4800]: I1125 18:25:23.903400 4800 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4n5v7"] Nov 25 18:25:23 crc kubenswrapper[4800]: E1125 18:25:23.904391 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cffe2037-f39f-4564-a129-0a09810c9cdf" containerName="registry-server" Nov 25 18:25:23 crc kubenswrapper[4800]: I1125 18:25:23.904407 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="cffe2037-f39f-4564-a129-0a09810c9cdf" containerName="registry-server" Nov 25 18:25:23 crc kubenswrapper[4800]: E1125 18:25:23.904424 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba5a3982-696c-420f-bec2-51386edd1bab" containerName="extract-utilities" Nov 25 18:25:23 crc kubenswrapper[4800]: I1125 18:25:23.904431 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba5a3982-696c-420f-bec2-51386edd1bab" containerName="extract-utilities" Nov 25 18:25:23 crc kubenswrapper[4800]: E1125 18:25:23.904461 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba5a3982-696c-420f-bec2-51386edd1bab" containerName="extract-content" Nov 25 18:25:23 crc kubenswrapper[4800]: I1125 18:25:23.904473 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba5a3982-696c-420f-bec2-51386edd1bab" containerName="extract-content" Nov 25 18:25:23 crc kubenswrapper[4800]: E1125 18:25:23.904489 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba5a3982-696c-420f-bec2-51386edd1bab" containerName="registry-server" Nov 25 18:25:23 crc kubenswrapper[4800]: I1125 18:25:23.904496 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba5a3982-696c-420f-bec2-51386edd1bab" containerName="registry-server" Nov 25 18:25:23 crc kubenswrapper[4800]: E1125 18:25:23.904506 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cffe2037-f39f-4564-a129-0a09810c9cdf" containerName="extract-content" Nov 25 18:25:23 crc kubenswrapper[4800]: I1125 18:25:23.904514 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="cffe2037-f39f-4564-a129-0a09810c9cdf" containerName="extract-content" Nov 25 18:25:23 crc kubenswrapper[4800]: E1125 18:25:23.904538 4800 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cffe2037-f39f-4564-a129-0a09810c9cdf" containerName="extract-utilities" Nov 25 18:25:23 crc kubenswrapper[4800]: I1125 18:25:23.904546 4800 state_mem.go:107] "Deleted CPUSet assignment" podUID="cffe2037-f39f-4564-a129-0a09810c9cdf" containerName="extract-utilities" Nov 25 18:25:23 crc kubenswrapper[4800]: I1125 18:25:23.904812 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="cffe2037-f39f-4564-a129-0a09810c9cdf" containerName="registry-server" Nov 25 18:25:23 crc kubenswrapper[4800]: I1125 18:25:23.904854 4800 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba5a3982-696c-420f-bec2-51386edd1bab" containerName="registry-server" Nov 25 18:25:23 crc kubenswrapper[4800]: I1125 18:25:23.906495 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:23 crc kubenswrapper[4800]: I1125 18:25:23.924201 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4n5v7"] Nov 25 18:25:24 crc kubenswrapper[4800]: I1125 18:25:24.034310 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-catalog-content\") pod \"certified-operators-4n5v7\" (UID: \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\") " pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:24 crc kubenswrapper[4800]: I1125 18:25:24.034564 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-utilities\") pod \"certified-operators-4n5v7\" (UID: \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\") " pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:24 crc kubenswrapper[4800]: I1125 18:25:24.034752 4800 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcnwf\" (UniqueName: \"kubernetes.io/projected/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-kube-api-access-mcnwf\") pod \"certified-operators-4n5v7\" (UID: \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\") " pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:24 crc kubenswrapper[4800]: I1125 18:25:24.136700 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-utilities\") pod \"certified-operators-4n5v7\" (UID: \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\") " pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:24 crc kubenswrapper[4800]: I1125 18:25:24.136801 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcnwf\" (UniqueName: \"kubernetes.io/projected/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-kube-api-access-mcnwf\") pod \"certified-operators-4n5v7\" (UID: \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\") " pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:24 crc kubenswrapper[4800]: I1125 18:25:24.136860 4800 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-catalog-content\") pod \"certified-operators-4n5v7\" (UID: \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\") " pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:24 crc kubenswrapper[4800]: I1125 18:25:24.137221 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-utilities\") pod \"certified-operators-4n5v7\" (UID: \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\") " pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:24 crc kubenswrapper[4800]: I1125 18:25:24.137249 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-catalog-content\") pod \"certified-operators-4n5v7\" (UID: \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\") " pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:24 crc kubenswrapper[4800]: I1125 18:25:24.158716 4800 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcnwf\" (UniqueName: \"kubernetes.io/projected/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-kube-api-access-mcnwf\") pod \"certified-operators-4n5v7\" (UID: \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\") " pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:24 crc kubenswrapper[4800]: I1125 18:25:24.231435 4800 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:24 crc kubenswrapper[4800]: I1125 18:25:24.769889 4800 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4n5v7"] Nov 25 18:25:25 crc kubenswrapper[4800]: I1125 18:25:25.721427 4800 generic.go:334] "Generic (PLEG): container finished" podID="8f78e00b-0b18-42e6-8cbf-29a3fb7406d3" containerID="207dab949107ca8ce8244e6fb06777204e8ded3e31ebfd6e076f291c61cdd4eb" exitCode=0 Nov 25 18:25:25 crc kubenswrapper[4800]: I1125 18:25:25.721529 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n5v7" event={"ID":"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3","Type":"ContainerDied","Data":"207dab949107ca8ce8244e6fb06777204e8ded3e31ebfd6e076f291c61cdd4eb"} Nov 25 18:25:25 crc kubenswrapper[4800]: I1125 18:25:25.724645 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n5v7" event={"ID":"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3","Type":"ContainerStarted","Data":"c918758441c2da8e6298f57c87f19a2c7ed07c268f159308cf5c0e8feeba6f36"} Nov 25 18:25:25 crc kubenswrapper[4800]: I1125 18:25:25.724771 4800 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:25:26 crc kubenswrapper[4800]: I1125 18:25:26.741071 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n5v7" event={"ID":"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3","Type":"ContainerStarted","Data":"7b44adeb192f4bed1c6c2fca5ce763f1ac920c254bf380475cdb6d6215e5025b"} Nov 25 18:25:27 crc kubenswrapper[4800]: I1125 18:25:27.756684 4800 generic.go:334] "Generic (PLEG): container finished" podID="8f78e00b-0b18-42e6-8cbf-29a3fb7406d3" containerID="7b44adeb192f4bed1c6c2fca5ce763f1ac920c254bf380475cdb6d6215e5025b" exitCode=0 Nov 25 18:25:27 crc kubenswrapper[4800]: I1125 18:25:27.756816 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n5v7" event={"ID":"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3","Type":"ContainerDied","Data":"7b44adeb192f4bed1c6c2fca5ce763f1ac920c254bf380475cdb6d6215e5025b"} Nov 25 18:25:28 crc kubenswrapper[4800]: I1125 18:25:28.770511 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n5v7" event={"ID":"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3","Type":"ContainerStarted","Data":"6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983"} Nov 25 18:25:28 crc kubenswrapper[4800]: I1125 18:25:28.810980 4800 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4n5v7" podStartSLOduration=3.33218607 podStartE2EDuration="5.810949916s" podCreationTimestamp="2025-11-25 18:25:23 +0000 UTC" firstStartedPulling="2025-11-25 18:25:25.724538487 +0000 UTC m=+11286.778946969" lastFinishedPulling="2025-11-25 18:25:28.203302323 +0000 UTC m=+11289.257710815" observedRunningTime="2025-11-25 18:25:28.792358185 +0000 UTC m=+11289.846766697" watchObservedRunningTime="2025-11-25 18:25:28.810949916 +0000 UTC m=+11289.865358438" Nov 25 18:25:34 crc kubenswrapper[4800]: I1125 18:25:34.232452 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:34 crc kubenswrapper[4800]: I1125 18:25:34.232996 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:34 crc kubenswrapper[4800]: I1125 18:25:34.323216 4800 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:34 crc kubenswrapper[4800]: I1125 18:25:34.875121 4800 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:34 crc kubenswrapper[4800]: I1125 18:25:34.939168 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4n5v7"] Nov 25 18:25:36 crc kubenswrapper[4800]: I1125 18:25:36.840486 4800 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4n5v7" podUID="8f78e00b-0b18-42e6-8cbf-29a3fb7406d3" containerName="registry-server" containerID="cri-o://6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983" gracePeriod=2 Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.379699 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.421158 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-utilities\") pod \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\" (UID: \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\") " Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.421481 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcnwf\" (UniqueName: \"kubernetes.io/projected/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-kube-api-access-mcnwf\") pod \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\" (UID: \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\") " Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.421599 4800 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-catalog-content\") pod \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\" (UID: \"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3\") " Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.422266 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-utilities" (OuterVolumeSpecName: "utilities") pod "8f78e00b-0b18-42e6-8cbf-29a3fb7406d3" (UID: "8f78e00b-0b18-42e6-8cbf-29a3fb7406d3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.427422 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-kube-api-access-mcnwf" (OuterVolumeSpecName: "kube-api-access-mcnwf") pod "8f78e00b-0b18-42e6-8cbf-29a3fb7406d3" (UID: "8f78e00b-0b18-42e6-8cbf-29a3fb7406d3"). InnerVolumeSpecName "kube-api-access-mcnwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.524446 4800 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.524493 4800 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcnwf\" (UniqueName: \"kubernetes.io/projected/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-kube-api-access-mcnwf\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.853311 4800 generic.go:334] "Generic (PLEG): container finished" podID="8f78e00b-0b18-42e6-8cbf-29a3fb7406d3" containerID="6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983" exitCode=0 Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.854234 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n5v7" event={"ID":"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3","Type":"ContainerDied","Data":"6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983"} Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.854275 4800 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n5v7" event={"ID":"8f78e00b-0b18-42e6-8cbf-29a3fb7406d3","Type":"ContainerDied","Data":"c918758441c2da8e6298f57c87f19a2c7ed07c268f159308cf5c0e8feeba6f36"} Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.854298 4800 scope.go:117] "RemoveContainer" containerID="6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.854470 4800 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4n5v7" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.878909 4800 scope.go:117] "RemoveContainer" containerID="7b44adeb192f4bed1c6c2fca5ce763f1ac920c254bf380475cdb6d6215e5025b" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.916650 4800 scope.go:117] "RemoveContainer" containerID="207dab949107ca8ce8244e6fb06777204e8ded3e31ebfd6e076f291c61cdd4eb" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.944946 4800 scope.go:117] "RemoveContainer" containerID="6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983" Nov 25 18:25:37 crc kubenswrapper[4800]: E1125 18:25:37.945341 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983\": container with ID starting with 6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983 not found: ID does not exist" containerID="6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.945389 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983"} err="failed to get container status \"6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983\": rpc error: code = NotFound desc = could not find container \"6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983\": container with ID starting with 6bf53dcea7a3039ff480880e98f26f07a33e30c519189ab1580067397ebdb983 not found: ID does not exist" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.945408 4800 scope.go:117] "RemoveContainer" containerID="7b44adeb192f4bed1c6c2fca5ce763f1ac920c254bf380475cdb6d6215e5025b" Nov 25 18:25:37 crc kubenswrapper[4800]: E1125 18:25:37.945721 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b44adeb192f4bed1c6c2fca5ce763f1ac920c254bf380475cdb6d6215e5025b\": container with ID starting with 7b44adeb192f4bed1c6c2fca5ce763f1ac920c254bf380475cdb6d6215e5025b not found: ID does not exist" containerID="7b44adeb192f4bed1c6c2fca5ce763f1ac920c254bf380475cdb6d6215e5025b" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.945740 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b44adeb192f4bed1c6c2fca5ce763f1ac920c254bf380475cdb6d6215e5025b"} err="failed to get container status \"7b44adeb192f4bed1c6c2fca5ce763f1ac920c254bf380475cdb6d6215e5025b\": rpc error: code = NotFound desc = could not find container \"7b44adeb192f4bed1c6c2fca5ce763f1ac920c254bf380475cdb6d6215e5025b\": container with ID starting with 7b44adeb192f4bed1c6c2fca5ce763f1ac920c254bf380475cdb6d6215e5025b not found: ID does not exist" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.945770 4800 scope.go:117] "RemoveContainer" containerID="207dab949107ca8ce8244e6fb06777204e8ded3e31ebfd6e076f291c61cdd4eb" Nov 25 18:25:37 crc kubenswrapper[4800]: E1125 18:25:37.946125 4800 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"207dab949107ca8ce8244e6fb06777204e8ded3e31ebfd6e076f291c61cdd4eb\": container with ID starting with 207dab949107ca8ce8244e6fb06777204e8ded3e31ebfd6e076f291c61cdd4eb not found: ID does not exist" containerID="207dab949107ca8ce8244e6fb06777204e8ded3e31ebfd6e076f291c61cdd4eb" Nov 25 18:25:37 crc kubenswrapper[4800]: I1125 18:25:37.946174 4800 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"207dab949107ca8ce8244e6fb06777204e8ded3e31ebfd6e076f291c61cdd4eb"} err="failed to get container status \"207dab949107ca8ce8244e6fb06777204e8ded3e31ebfd6e076f291c61cdd4eb\": rpc error: code = NotFound desc = could not find container \"207dab949107ca8ce8244e6fb06777204e8ded3e31ebfd6e076f291c61cdd4eb\": container with ID starting with 207dab949107ca8ce8244e6fb06777204e8ded3e31ebfd6e076f291c61cdd4eb not found: ID does not exist" Nov 25 18:25:38 crc kubenswrapper[4800]: I1125 18:25:38.166696 4800 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8f78e00b-0b18-42e6-8cbf-29a3fb7406d3" (UID: "8f78e00b-0b18-42e6-8cbf-29a3fb7406d3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:25:38 crc kubenswrapper[4800]: I1125 18:25:38.237077 4800 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:38 crc kubenswrapper[4800]: I1125 18:25:38.502237 4800 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4n5v7"] Nov 25 18:25:38 crc kubenswrapper[4800]: I1125 18:25:38.514127 4800 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4n5v7"] Nov 25 18:25:39 crc kubenswrapper[4800]: I1125 18:25:39.802696 4800 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f78e00b-0b18-42e6-8cbf-29a3fb7406d3" path="/var/lib/kubelet/pods/8f78e00b-0b18-42e6-8cbf-29a3fb7406d3/volumes" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111372272024446 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111372273017364 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111343611016501 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111343612015452 5ustar corecore